/src/gmp/mpn/toom42_mul.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* mpn_toom42_mul -- Multiply {ap,an} and {bp,bn} where an is nominally twice |
2 | | as large as bn. Or more accurately, (3/2)bn < an < 4bn. |
3 | | |
4 | | Contributed to the GNU project by Torbjorn Granlund. |
5 | | Additional improvements by Marco Bodrato. |
6 | | |
7 | | The idea of applying toom to unbalanced multiplication is due to Marco |
8 | | Bodrato and Alberto Zanoni. |
9 | | |
10 | | THE FUNCTION IN THIS FILE IS INTERNAL WITH A MUTABLE INTERFACE. IT IS ONLY |
11 | | SAFE TO REACH IT THROUGH DOCUMENTED INTERFACES. IN FACT, IT IS ALMOST |
12 | | GUARANTEED THAT IT WILL CHANGE OR DISAPPEAR IN A FUTURE GNU MP RELEASE. |
13 | | |
14 | | Copyright 2006-2008, 2012, 2014 Free Software Foundation, Inc. |
15 | | |
16 | | This file is part of the GNU MP Library. |
17 | | |
18 | | The GNU MP Library is free software; you can redistribute it and/or modify |
19 | | it under the terms of either: |
20 | | |
21 | | * the GNU Lesser General Public License as published by the Free |
22 | | Software Foundation; either version 3 of the License, or (at your |
23 | | option) any later version. |
24 | | |
25 | | or |
26 | | |
27 | | * the GNU General Public License as published by the Free Software |
28 | | Foundation; either version 2 of the License, or (at your option) any |
29 | | later version. |
30 | | |
31 | | or both in parallel, as here. |
32 | | |
33 | | The GNU MP Library is distributed in the hope that it will be useful, but |
34 | | WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY |
35 | | or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
36 | | for more details. |
37 | | |
38 | | You should have received copies of the GNU General Public License and the |
39 | | GNU Lesser General Public License along with the GNU MP Library. If not, |
40 | | see https://www.gnu.org/licenses/. */ |
41 | | |
42 | | |
43 | | #include "gmp-impl.h" |
44 | | |
45 | | /* Evaluate in: -1, 0, +1, +2, +inf |
46 | | |
47 | | <-s-><--n--><--n--><--n--> |
48 | | ___ ______ ______ ______ |
49 | | |a3_|___a2_|___a1_|___a0_| |
50 | | |_b1_|___b0_| |
51 | | <-t--><--n--> |
52 | | |
53 | | v0 = a0 * b0 # A(0)*B(0) |
54 | | v1 = (a0+ a1+ a2+ a3)*(b0+ b1) # A(1)*B(1) ah <= 3 bh <= 1 |
55 | | vm1 = (a0- a1+ a2- a3)*(b0- b1) # A(-1)*B(-1) |ah| <= 1 bh = 0 |
56 | | v2 = (a0+2a1+4a2+8a3)*(b0+2b1) # A(2)*B(2) ah <= 14 bh <= 2 |
57 | | vinf= a3 * b1 # A(inf)*B(inf) |
58 | | */ |
59 | | |
60 | | #define TOOM42_MUL_N_REC(p, a, b, n, ws) \ |
61 | 0 | do { \ |
62 | 0 | mpn_mul_n (p, a, b, n); \ |
63 | 0 | } while (0) |
64 | | |
65 | | void |
66 | | mpn_toom42_mul (mp_ptr pp, |
67 | | mp_srcptr ap, mp_size_t an, |
68 | | mp_srcptr bp, mp_size_t bn, |
69 | | mp_ptr scratch) |
70 | 0 | { |
71 | 0 | mp_size_t n, s, t; |
72 | 0 | int vm1_neg; |
73 | 0 | mp_limb_t cy, vinf0; |
74 | 0 | mp_ptr a0_a2; |
75 | 0 | mp_ptr as1, asm1, as2; |
76 | 0 | mp_ptr bs1, bsm1, bs2; |
77 | 0 | mp_ptr tmp; |
78 | 0 | TMP_DECL; |
79 | |
|
80 | 0 | #define a0 ap |
81 | 0 | #define a1 (ap + n) |
82 | 0 | #define a2 (ap + 2*n) |
83 | 0 | #define a3 (ap + 3*n) |
84 | 0 | #define b0 bp |
85 | 0 | #define b1 (bp + n) |
86 | |
|
87 | 0 | n = an >= 2 * bn ? (an + 3) >> 2 : (bn + 1) >> 1; |
88 | |
|
89 | 0 | s = an - 3 * n; |
90 | 0 | t = bn - n; |
91 | |
|
92 | 0 | ASSERT (0 < s && s <= n); |
93 | 0 | ASSERT (0 < t && t <= n); |
94 | |
|
95 | 0 | TMP_MARK; |
96 | |
|
97 | 0 | tmp = TMP_ALLOC_LIMBS (6 * n + 5); |
98 | 0 | as1 = tmp; tmp += n + 1; |
99 | 0 | asm1 = tmp; tmp += n + 1; |
100 | 0 | as2 = tmp; tmp += n + 1; |
101 | 0 | bs1 = tmp; tmp += n + 1; |
102 | 0 | bsm1 = tmp; tmp += n; |
103 | 0 | bs2 = tmp; tmp += n + 1; |
104 | |
|
105 | 0 | a0_a2 = pp; |
106 | | |
107 | | /* Compute as1 and asm1. */ |
108 | 0 | vm1_neg = mpn_toom_eval_dgr3_pm1 (as1, asm1, ap, n, s, a0_a2) & 1; |
109 | | |
110 | | /* Compute as2. */ |
111 | 0 | #if HAVE_NATIVE_mpn_addlsh1_n |
112 | 0 | cy = mpn_addlsh1_n (as2, a2, a3, s); |
113 | 0 | if (s != n) |
114 | 0 | cy = mpn_add_1 (as2 + s, a2 + s, n - s, cy); |
115 | 0 | cy = 2 * cy + mpn_addlsh1_n (as2, a1, as2, n); |
116 | 0 | cy = 2 * cy + mpn_addlsh1_n (as2, a0, as2, n); |
117 | | #else |
118 | | cy = mpn_lshift (as2, a3, s, 1); |
119 | | cy += mpn_add_n (as2, a2, as2, s); |
120 | | if (s != n) |
121 | | cy = mpn_add_1 (as2 + s, a2 + s, n - s, cy); |
122 | | cy = 2 * cy + mpn_lshift (as2, as2, n, 1); |
123 | | cy += mpn_add_n (as2, a1, as2, n); |
124 | | cy = 2 * cy + mpn_lshift (as2, as2, n, 1); |
125 | | cy += mpn_add_n (as2, a0, as2, n); |
126 | | #endif |
127 | 0 | as2[n] = cy; |
128 | | |
129 | | /* Compute bs1 and bsm1. */ |
130 | 0 | if (t == n) |
131 | 0 | { |
132 | | #if HAVE_NATIVE_mpn_add_n_sub_n |
133 | | if (mpn_cmp (b0, b1, n) < 0) |
134 | | { |
135 | | cy = mpn_add_n_sub_n (bs1, bsm1, b1, b0, n); |
136 | | vm1_neg ^= 1; |
137 | | } |
138 | | else |
139 | | { |
140 | | cy = mpn_add_n_sub_n (bs1, bsm1, b0, b1, n); |
141 | | } |
142 | | bs1[n] = cy >> 1; |
143 | | #else |
144 | 0 | bs1[n] = mpn_add_n (bs1, b0, b1, n); |
145 | |
|
146 | 0 | if (mpn_cmp (b0, b1, n) < 0) |
147 | 0 | { |
148 | 0 | mpn_sub_n (bsm1, b1, b0, n); |
149 | 0 | vm1_neg ^= 1; |
150 | 0 | } |
151 | 0 | else |
152 | 0 | { |
153 | 0 | mpn_sub_n (bsm1, b0, b1, n); |
154 | 0 | } |
155 | 0 | #endif |
156 | 0 | } |
157 | 0 | else |
158 | 0 | { |
159 | 0 | bs1[n] = mpn_add (bs1, b0, n, b1, t); |
160 | |
|
161 | 0 | if (mpn_zero_p (b0 + t, n - t) && mpn_cmp (b0, b1, t) < 0) |
162 | 0 | { |
163 | 0 | mpn_sub_n (bsm1, b1, b0, t); |
164 | 0 | MPN_ZERO (bsm1 + t, n - t); |
165 | 0 | vm1_neg ^= 1; |
166 | 0 | } |
167 | 0 | else |
168 | 0 | { |
169 | 0 | mpn_sub (bsm1, b0, n, b1, t); |
170 | 0 | } |
171 | 0 | } |
172 | | |
173 | | /* Compute bs2, recycling bs1. bs2=bs1+b1 */ |
174 | 0 | mpn_add (bs2, bs1, n + 1, b1, t); |
175 | |
|
176 | 0 | ASSERT (as1[n] <= 3); |
177 | 0 | ASSERT (bs1[n] <= 1); |
178 | 0 | ASSERT (asm1[n] <= 1); |
179 | | /*ASSERT (bsm1[n] == 0);*/ |
180 | 0 | ASSERT (as2[n] <= 14); |
181 | 0 | ASSERT (bs2[n] <= 2); |
182 | |
|
183 | 0 | #define v0 pp /* 2n */ |
184 | 0 | #define v1 (pp + 2 * n) /* 2n+1 */ |
185 | 0 | #define vinf (pp + 4 * n) /* s+t */ |
186 | 0 | #define vm1 scratch /* 2n+1 */ |
187 | 0 | #define v2 (scratch + 2 * n + 1) /* 2n+2 */ |
188 | 0 | #define scratch_out scratch + 4 * n + 4 /* Currently unused. */ |
189 | | |
190 | | /* vm1, 2n+1 limbs */ |
191 | 0 | TOOM42_MUL_N_REC (vm1, asm1, bsm1, n, scratch_out); |
192 | 0 | cy = 0; |
193 | 0 | if (asm1[n] != 0) |
194 | 0 | cy = mpn_add_n (vm1 + n, vm1 + n, bsm1, n); |
195 | 0 | vm1[2 * n] = cy; |
196 | |
|
197 | 0 | TOOM42_MUL_N_REC (v2, as2, bs2, n + 1, scratch_out); /* v2, 2n+1 limbs */ |
198 | | |
199 | | /* vinf, s+t limbs */ |
200 | 0 | if (s > t) mpn_mul (vinf, a3, s, b1, t); |
201 | 0 | else mpn_mul (vinf, b1, t, a3, s); |
202 | |
|
203 | 0 | vinf0 = vinf[0]; /* v1 overlaps with this */ |
204 | | |
205 | | /* v1, 2n+1 limbs */ |
206 | 0 | TOOM42_MUL_N_REC (v1, as1, bs1, n, scratch_out); |
207 | 0 | if (as1[n] == 1) |
208 | 0 | { |
209 | 0 | cy = mpn_add_n (v1 + n, v1 + n, bs1, n); |
210 | 0 | } |
211 | 0 | else if (as1[n] == 2) |
212 | 0 | { |
213 | 0 | #if HAVE_NATIVE_mpn_addlsh1_n_ip1 |
214 | 0 | cy = mpn_addlsh1_n_ip1 (v1 + n, bs1, n); |
215 | | #else |
216 | | cy = mpn_addmul_1 (v1 + n, bs1, n, CNST_LIMB(2)); |
217 | | #endif |
218 | 0 | } |
219 | 0 | else if (as1[n] == 3) |
220 | 0 | { |
221 | 0 | cy = mpn_addmul_1 (v1 + n, bs1, n, CNST_LIMB(3)); |
222 | 0 | } |
223 | 0 | else |
224 | 0 | cy = 0; |
225 | 0 | if (bs1[n] != 0) |
226 | 0 | cy += as1[n] + mpn_add_n (v1 + n, v1 + n, as1, n); |
227 | 0 | v1[2 * n] = cy; |
228 | |
|
229 | 0 | TOOM42_MUL_N_REC (v0, ap, bp, n, scratch_out); /* v0, 2n limbs */ |
230 | |
|
231 | 0 | mpn_toom_interpolate_5pts (pp, v2, vm1, n, s + t, vm1_neg, vinf0); |
232 | |
|
233 | 0 | TMP_FREE; |
234 | 0 | } |