/src/gmp/mpn/sec_invert.c
Line  | Count  | Source (jump to first uncovered line)  | 
1  |  | /* mpn_sec_invert  | 
2  |  |  | 
3  |  |    Contributed to the GNU project by Niels Möller  | 
4  |  |  | 
5  |  | Copyright 2013 Free Software Foundation, Inc.  | 
6  |  |  | 
7  |  | This file is part of the GNU MP Library.  | 
8  |  |  | 
9  |  | The GNU MP Library is free software; you can redistribute it and/or modify  | 
10  |  | it under the terms of either:  | 
11  |  |  | 
12  |  |   * the GNU Lesser General Public License as published by the Free  | 
13  |  |     Software Foundation; either version 3 of the License, or (at your  | 
14  |  |     option) any later version.  | 
15  |  |  | 
16  |  | or  | 
17  |  |  | 
18  |  |   * the GNU General Public License as published by the Free Software  | 
19  |  |     Foundation; either version 2 of the License, or (at your option) any  | 
20  |  |     later version.  | 
21  |  |  | 
22  |  | or both in parallel, as here.  | 
23  |  |  | 
24  |  | The GNU MP Library is distributed in the hope that it will be useful, but  | 
25  |  | WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY  | 
26  |  | or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License  | 
27  |  | for more details.  | 
28  |  |  | 
29  |  | You should have received copies of the GNU General Public License and the  | 
30  |  | GNU Lesser General Public License along with the GNU MP Library.  If not,  | 
31  |  | see https://www.gnu.org/licenses/.  */  | 
32  |  |  | 
33  |  | #include "gmp-impl.h"  | 
34  |  |  | 
35  |  | #if 0  | 
36  |  | /* Currently unused. Should be resurrected once mpn_cnd_neg is  | 
37  |  |    advertised. */  | 
38  |  | static mp_size_t  | 
39  |  | mpn_cnd_neg_itch (mp_size_t n)  | 
40  |  | { | 
41  |  |   return n;  | 
42  |  | }  | 
43  |  | #endif  | 
44  |  |  | 
45  |  | /* FIXME: Ought to return carry */  | 
46  |  | static void  | 
47  |  | mpn_cnd_neg (int cnd, mp_limb_t *rp, const mp_limb_t *ap, mp_size_t n,  | 
48  |  |        mp_ptr scratch)  | 
49  | 0  | { | 
50  | 0  |   mpn_lshift (scratch, ap, n, 1);  | 
51  | 0  |   mpn_cnd_sub_n (cnd, rp, ap, scratch, n);  | 
52  | 0  | }  | 
53  |  |  | 
54  |  | static int  | 
55  |  | mpn_sec_eq_ui (mp_srcptr ap, mp_size_t n, mp_limb_t b)  | 
56  | 0  | { | 
57  | 0  |   mp_limb_t d;  | 
58  | 0  |   ASSERT (n > 0);  | 
59  |  | 
  | 
60  | 0  |   d = ap[0] ^ b;  | 
61  |  | 
  | 
62  | 0  |   while (--n > 0)  | 
63  | 0  |     d |= ap[n];  | 
64  |  | 
  | 
65  | 0  |   return d == 0;  | 
66  | 0  | }  | 
67  |  |  | 
68  |  | mp_size_t  | 
69  |  | mpn_sec_invert_itch (mp_size_t n)  | 
70  | 0  | { | 
71  | 0  |   return 4*n;  | 
72  | 0  | }  | 
73  |  |  | 
74  |  | /* Compute V <-- A^{-1} (mod M), in data-independent time. M must be | 
75  |  |    odd. Returns 1 on success, and 0 on failure (i.e., if gcd (A, m) !=  | 
76  |  |    1). Inputs and outputs of size n, and no overlap allowed. The {ap, | 
77  |  |    n} area is destroyed. For arbitrary inputs, bit_size should be  | 
78  |  |    2*n*GMP_NUMB_BITS, but if A or M are known to be smaller, e.g., if  | 
79  |  |    M = 2^521 - 1 and A < M, bit_size can be any bound on the sum of  | 
80  |  |    the bit sizes of A and M. */  | 
81  |  | int  | 
82  |  | mpn_sec_invert (mp_ptr vp, mp_ptr ap, mp_srcptr mp,  | 
83  |  |     mp_size_t n, mp_bitcnt_t bit_size,  | 
84  |  |     mp_ptr scratch)  | 
85  | 0  | { | 
86  | 0  |   ASSERT (n > 0);  | 
87  | 0  |   ASSERT (bit_size > 0);  | 
88  | 0  |   ASSERT (mp[0] & 1);  | 
89  | 0  |   ASSERT (! MPN_OVERLAP_P (ap, n, vp, n));  | 
90  | 0  | #define bp (scratch + n)  | 
91  | 0  | #define up (scratch + 2*n)  | 
92  | 0  | #define m1hp (scratch + 3*n)  | 
93  |  |  | 
94  |  |   /* Maintain  | 
95  |  |  | 
96  |  |        a = u * orig_a (mod m)  | 
97  |  |        b = v * orig_a (mod m)  | 
98  |  |  | 
99  |  |      and b odd at all times. Initially,  | 
100  |  |  | 
101  |  |        a = a_orig, u = 1  | 
102  |  |        b = m,      v = 0  | 
103  |  |      */  | 
104  |  |  | 
105  |  | 
  | 
106  | 0  |   up[0] = 1;  | 
107  | 0  |   mpn_zero (up+1, n - 1);  | 
108  | 0  |   mpn_copyi (bp, mp, n);  | 
109  | 0  |   mpn_zero (vp, n);  | 
110  |  | 
  | 
111  | 0  |   ASSERT_CARRY (mpn_rshift (m1hp, mp, n, 1));  | 
112  | 0  |   ASSERT_NOCARRY (mpn_sec_add_1 (m1hp, m1hp, n, 1, scratch));  | 
113  |  | 
  | 
114  | 0  |   while (bit_size-- > 0)  | 
115  | 0  |     { | 
116  | 0  |       mp_limb_t odd, swap, cy;  | 
117  |  |  | 
118  |  |       /* Always maintain b odd. The logic of the iteration is as  | 
119  |  |    follows. For a, b:  | 
120  |  |  | 
121  |  |      odd = a & 1  | 
122  |  |      a -= odd * b  | 
123  |  |      if (underflow from a-b)  | 
124  |  |        { | 
125  |  |          b += a, assigns old a  | 
126  |  |          a = B^n-a  | 
127  |  |        }  | 
128  |  |  | 
129  |  |      a /= 2  | 
130  |  |  | 
131  |  |    For u, v:  | 
132  |  |  | 
133  |  |      if (underflow from a - b)  | 
134  |  |        swap u, v  | 
135  |  |      u -= odd * v  | 
136  |  |      if (underflow from u - v)  | 
137  |  |        u += m  | 
138  |  |  | 
139  |  |      u /= 2  | 
140  |  |      if (a one bit was shifted out)  | 
141  |  |        u += (m+1)/2  | 
142  |  |  | 
143  |  |    As long as a > 0, the quantity  | 
144  |  |  | 
145  |  |      (bitsize of a) + (bitsize of b)  | 
146  |  |  | 
147  |  |    is reduced by at least one bit per iteration, hence after (bit_size of  | 
148  |  |    orig_a) + (bit_size of m) - 1 iterations we surely have a = 0. Then b  | 
149  |  |    = gcd(orig_a, m) and if b = 1 then also v = orig_a^{-1} (mod m). | 
150  |  |       */  | 
151  |  | 
  | 
152  | 0  |       ASSERT (bp[0] & 1);  | 
153  | 0  |       odd = ap[0] & 1;  | 
154  |  | 
  | 
155  | 0  |       swap = mpn_cnd_sub_n (odd, ap, ap, bp, n);  | 
156  | 0  |       mpn_cnd_add_n (swap, bp, bp, ap, n);  | 
157  | 0  |       mpn_cnd_neg (swap, ap, ap, n, scratch);  | 
158  |  | 
  | 
159  | 0  |       mpn_cnd_swap (swap, up, vp, n);  | 
160  | 0  |       cy = mpn_cnd_sub_n (odd, up, up, vp, n);  | 
161  | 0  |       cy -= mpn_cnd_add_n (cy, up, up, mp, n);  | 
162  | 0  |       ASSERT (cy == 0);  | 
163  |  | 
  | 
164  | 0  |       cy = mpn_rshift (ap, ap, n, 1);  | 
165  | 0  |       ASSERT (cy == 0);  | 
166  | 0  |       cy = mpn_rshift (up, up, n, 1);  | 
167  | 0  |       cy = mpn_cnd_add_n (cy, up, up, m1hp, n);  | 
168  | 0  |       ASSERT (cy == 0);  | 
169  | 0  |     }  | 
170  |  |   /* Should be all zeros, but check only extreme limbs */  | 
171  | 0  |   ASSERT ( (ap[0] | ap[n-1]) == 0);  | 
172  |  |   /* Check if indeed gcd == 1. */  | 
173  | 0  |   return mpn_sec_eq_ui (bp, n, 1);  | 
174  | 0  | #undef bp  | 
175  | 0  | #undef up  | 
176  | 0  | #undef m1hp  | 
177  | 0  | }  |