/src/gmp/mpn/mu_divappr_q.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* mpn_mu_divappr_q, mpn_preinv_mu_divappr_q. |
2 | | |
3 | | Compute Q = floor(N / D) + e. N is nn limbs, D is dn limbs and must be |
4 | | normalized, and Q must be nn-dn limbs, 0 <= e <= 4. The requirement that Q |
5 | | is nn-dn limbs (and not nn-dn+1 limbs) was put in place in order to allow us |
6 | | to let N be unmodified during the operation. |
7 | | |
8 | | Contributed to the GNU project by Torbjorn Granlund. |
9 | | |
10 | | THE FUNCTIONS IN THIS FILE ARE INTERNAL WITH MUTABLE INTERFACES. IT IS ONLY |
11 | | SAFE TO REACH THEM THROUGH DOCUMENTED INTERFACES. IN FACT, IT IS ALMOST |
12 | | GUARANTEED THAT THEY WILL CHANGE OR DISAPPEAR IN A FUTURE GMP RELEASE. |
13 | | |
14 | | Copyright 2005-2007, 2009, 2010 Free Software Foundation, Inc. |
15 | | |
16 | | This file is part of the GNU MP Library. |
17 | | |
18 | | The GNU MP Library is free software; you can redistribute it and/or modify |
19 | | it under the terms of either: |
20 | | |
21 | | * the GNU Lesser General Public License as published by the Free |
22 | | Software Foundation; either version 3 of the License, or (at your |
23 | | option) any later version. |
24 | | |
25 | | or |
26 | | |
27 | | * the GNU General Public License as published by the Free Software |
28 | | Foundation; either version 2 of the License, or (at your option) any |
29 | | later version. |
30 | | |
31 | | or both in parallel, as here. |
32 | | |
33 | | The GNU MP Library is distributed in the hope that it will be useful, but |
34 | | WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY |
35 | | or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
36 | | for more details. |
37 | | |
38 | | You should have received copies of the GNU General Public License and the |
39 | | GNU Lesser General Public License along with the GNU MP Library. If not, |
40 | | see https://www.gnu.org/licenses/. */ |
41 | | |
42 | | |
43 | | /* |
44 | | The idea of the algorithm used herein is to compute a smaller inverted value |
45 | | than used in the standard Barrett algorithm, and thus save time in the |
46 | | Newton iterations, and pay just a small price when using the inverted value |
47 | | for developing quotient bits. This algorithm was presented at ICMS 2006. |
48 | | */ |
49 | | |
50 | | /* CAUTION: This code and the code in mu_div_qr.c should be edited in sync. |
51 | | |
52 | | Things to work on: |
53 | | |
54 | | * The itch/scratch scheme isn't perhaps such a good idea as it once seemed, |
55 | | demonstrated by the fact that the mpn_invertappr function's scratch needs |
56 | | mean that we need to keep a large allocation long after it is needed. |
57 | | Things are worse as mpn_mul_fft does not accept any scratch parameter, |
58 | | which means we'll have a large memory hole while in mpn_mul_fft. In |
59 | | general, a peak scratch need in the beginning of a function isn't |
60 | | well-handled by the itch/scratch scheme. |
61 | | */ |
62 | | |
63 | | #ifdef STAT |
64 | | #undef STAT |
65 | | #define STAT(x) x |
66 | | #else |
67 | | #define STAT(x) |
68 | | #endif |
69 | | |
70 | | #include <stdlib.h> /* for NULL */ |
71 | | #include "gmp-impl.h" |
72 | | |
73 | | static mp_limb_t mpn_preinv_mu_divappr_q (mp_ptr, mp_srcptr, mp_size_t, |
74 | | mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr); |
75 | | static mp_size_t mpn_mu_divappr_q_choose_in (mp_size_t, mp_size_t, int); |
76 | | |
77 | | mp_limb_t |
78 | | mpn_mu_divappr_q (mp_ptr qp, |
79 | | mp_srcptr np, |
80 | | mp_size_t nn, |
81 | | mp_srcptr dp, |
82 | | mp_size_t dn, |
83 | | mp_ptr scratch) |
84 | 0 | { |
85 | 0 | mp_size_t qn, in; |
86 | 0 | mp_limb_t cy, qh; |
87 | 0 | mp_ptr ip, tp; |
88 | |
|
89 | 0 | ASSERT (dn > 1); |
90 | |
|
91 | 0 | qn = nn - dn; |
92 | | |
93 | | /* If Q is smaller than D, truncate operands. */ |
94 | 0 | if (qn + 1 < dn) |
95 | 0 | { |
96 | 0 | np += dn - (qn + 1); |
97 | 0 | nn -= dn - (qn + 1); |
98 | 0 | dp += dn - (qn + 1); |
99 | 0 | dn = qn + 1; |
100 | 0 | } |
101 | | |
102 | | /* Compute the inverse size. */ |
103 | 0 | in = mpn_mu_divappr_q_choose_in (qn, dn, 0); |
104 | 0 | ASSERT (in <= dn); |
105 | |
|
106 | 0 | #if 1 |
107 | | /* This alternative inverse computation method gets slightly more accurate |
108 | | results. FIXMEs: (1) Temp allocation needs not analysed (2) itch function |
109 | | not adapted (3) mpn_invertappr scratch needs not met. */ |
110 | 0 | ip = scratch; |
111 | 0 | tp = scratch + in + 1; |
112 | | |
113 | | /* compute an approximate inverse on (in+1) limbs */ |
114 | 0 | if (dn == in) |
115 | 0 | { |
116 | 0 | MPN_COPY (tp + 1, dp, in); |
117 | 0 | tp[0] = 1; |
118 | 0 | mpn_invertappr (ip, tp, in + 1, tp + in + 1); |
119 | 0 | MPN_COPY_INCR (ip, ip + 1, in); |
120 | 0 | } |
121 | 0 | else |
122 | 0 | { |
123 | 0 | cy = mpn_add_1 (tp, dp + dn - (in + 1), in + 1, 1); |
124 | 0 | if (UNLIKELY (cy != 0)) |
125 | 0 | MPN_ZERO (ip, in); |
126 | 0 | else |
127 | 0 | { |
128 | 0 | mpn_invertappr (ip, tp, in + 1, tp + in + 1); |
129 | 0 | MPN_COPY_INCR (ip, ip + 1, in); |
130 | 0 | } |
131 | 0 | } |
132 | | #else |
133 | | /* This older inverse computation method gets slightly worse results than the |
134 | | one above. */ |
135 | | ip = scratch; |
136 | | tp = scratch + in; |
137 | | |
138 | | /* Compute inverse of D to in+1 limbs, then round to 'in' limbs. Ideally the |
139 | | inversion function should do this automatically. */ |
140 | | if (dn == in) |
141 | | { |
142 | | tp[in + 1] = 0; |
143 | | MPN_COPY (tp + in + 2, dp, in); |
144 | | mpn_invertappr (tp, tp + in + 1, in + 1, NULL); |
145 | | } |
146 | | else |
147 | | { |
148 | | mpn_invertappr (tp, dp + dn - (in + 1), in + 1, NULL); |
149 | | } |
150 | | cy = mpn_sub_1 (tp, tp, in + 1, GMP_NUMB_HIGHBIT); |
151 | | if (UNLIKELY (cy != 0)) |
152 | | MPN_ZERO (tp + 1, in); |
153 | | MPN_COPY (ip, tp + 1, in); |
154 | | #endif |
155 | |
|
156 | 0 | qh = mpn_preinv_mu_divappr_q (qp, np, nn, dp, dn, ip, in, scratch + in); |
157 | |
|
158 | 0 | return qh; |
159 | 0 | } |
160 | | |
161 | | static mp_limb_t |
162 | | mpn_preinv_mu_divappr_q (mp_ptr qp, |
163 | | mp_srcptr np, |
164 | | mp_size_t nn, |
165 | | mp_srcptr dp, |
166 | | mp_size_t dn, |
167 | | mp_srcptr ip, |
168 | | mp_size_t in, |
169 | | mp_ptr scratch) |
170 | 0 | { |
171 | 0 | mp_size_t qn; |
172 | 0 | mp_limb_t cy, cx, qh; |
173 | 0 | mp_limb_t r; |
174 | 0 | mp_size_t tn, wn; |
175 | |
|
176 | 0 | #define rp scratch |
177 | 0 | #define tp (scratch + dn) |
178 | 0 | #define scratch_out (scratch + dn + tn) |
179 | |
|
180 | 0 | qn = nn - dn; |
181 | |
|
182 | 0 | np += qn; |
183 | 0 | qp += qn; |
184 | |
|
185 | 0 | qh = mpn_cmp (np, dp, dn) >= 0; |
186 | 0 | if (qh != 0) |
187 | 0 | mpn_sub_n (rp, np, dp, dn); |
188 | 0 | else |
189 | 0 | MPN_COPY (rp, np, dn); |
190 | |
|
191 | 0 | if (UNLIKELY (qn == 0)) |
192 | 0 | return qh; /* Degenerate use. Should we allow this? */ |
193 | | |
194 | 0 | for (;;) /* The exit condition (qn == 0) is verified in the loop. */ |
195 | 0 | { |
196 | 0 | if (qn < in) |
197 | 0 | { |
198 | 0 | ip += in - qn; |
199 | 0 | in = qn; |
200 | 0 | } |
201 | 0 | np -= in; |
202 | 0 | qp -= in; |
203 | | |
204 | | /* Compute the next block of quotient limbs by multiplying the inverse I |
205 | | by the upper part of the partial remainder R. */ |
206 | 0 | mpn_mul_n (tp, rp + dn - in, ip, in); /* mulhi */ |
207 | 0 | cy = mpn_add_n (qp, tp + in, rp + dn - in, in); /* I's msb implicit */ |
208 | 0 | ASSERT_ALWAYS (cy == 0); |
209 | | |
210 | 0 | qn -= in; |
211 | 0 | if (qn == 0) |
212 | 0 | break; |
213 | | |
214 | | /* Compute the product of the quotient block and the divisor D, to be |
215 | | subtracted from the partial remainder combined with new limbs from the |
216 | | dividend N. We only really need the low dn limbs. */ |
217 | | |
218 | 0 | if (BELOW_THRESHOLD (in, MUL_TO_MULMOD_BNM1_FOR_2NXN_THRESHOLD)) |
219 | 0 | mpn_mul (tp, dp, dn, qp, in); /* dn+in limbs, high 'in' cancels */ |
220 | 0 | else |
221 | 0 | { |
222 | 0 | tn = mpn_mulmod_bnm1_next_size (dn + 1); |
223 | 0 | mpn_mulmod_bnm1 (tp, tn, dp, dn, qp, in, scratch_out); |
224 | 0 | wn = dn + in - tn; /* number of wrapped limbs */ |
225 | 0 | if (wn > 0) |
226 | 0 | { |
227 | 0 | cy = mpn_sub_n (tp, tp, rp + dn - wn, wn); |
228 | 0 | cy = mpn_sub_1 (tp + wn, tp + wn, tn - wn, cy); |
229 | 0 | cx = mpn_cmp (rp + dn - in, tp + dn, tn - dn) < 0; |
230 | 0 | ASSERT_ALWAYS (cx >= cy); |
231 | 0 | mpn_incr_u (tp, cx - cy); |
232 | 0 | } |
233 | 0 | } |
234 | | |
235 | 0 | r = rp[dn - in] - tp[dn]; |
236 | | |
237 | | /* Subtract the product from the partial remainder combined with new |
238 | | limbs from the dividend N, generating a new partial remainder R. */ |
239 | 0 | if (dn != in) |
240 | 0 | { |
241 | 0 | cy = mpn_sub_n (tp, np, tp, in); /* get next 'in' limbs from N */ |
242 | 0 | cy = mpn_sub_nc (tp + in, rp, tp + in, dn - in, cy); |
243 | 0 | MPN_COPY (rp, tp, dn); /* FIXME: try to avoid this */ |
244 | 0 | } |
245 | 0 | else |
246 | 0 | { |
247 | 0 | cy = mpn_sub_n (rp, np, tp, in); /* get next 'in' limbs from N */ |
248 | 0 | } |
249 | |
|
250 | 0 | STAT (int i; int err = 0; |
251 | 0 | static int errarr[5]; static int err_rec; static int tot); |
252 | | |
253 | | /* Check the remainder R and adjust the quotient as needed. */ |
254 | 0 | r -= cy; |
255 | 0 | while (r != 0) |
256 | 0 | { |
257 | | /* We loop 0 times with about 69% probability, 1 time with about 31% |
258 | | probability, 2 times with about 0.6% probability, if inverse is |
259 | | computed as recommended. */ |
260 | 0 | mpn_incr_u (qp, 1); |
261 | 0 | cy = mpn_sub_n (rp, rp, dp, dn); |
262 | 0 | r -= cy; |
263 | 0 | STAT (err++); |
264 | 0 | } |
265 | 0 | if (mpn_cmp (rp, dp, dn) >= 0) |
266 | 0 | { |
267 | | /* This is executed with about 76% probability. */ |
268 | 0 | mpn_incr_u (qp, 1); |
269 | 0 | cy = mpn_sub_n (rp, rp, dp, dn); |
270 | 0 | STAT (err++); |
271 | 0 | } |
272 | |
|
273 | 0 | STAT ( |
274 | 0 | tot++; |
275 | 0 | errarr[err]++; |
276 | 0 | if (err > err_rec) |
277 | 0 | err_rec = err; |
278 | 0 | if (tot % 0x10000 == 0) |
279 | 0 | { |
280 | 0 | for (i = 0; i <= err_rec; i++) |
281 | 0 | printf (" %d(%.1f%%)", errarr[i], 100.0*errarr[i]/tot); |
282 | 0 | printf ("\n"); |
283 | 0 | } |
284 | 0 | ); |
285 | 0 | } |
286 | | |
287 | | /* FIXME: We should perhaps be somewhat more elegant in our rounding of the |
288 | | quotient. For now, just make sure the returned quotient is >= the real |
289 | | quotient; add 3 with saturating arithmetic. */ |
290 | 0 | qn = nn - dn; |
291 | 0 | cy += mpn_add_1 (qp, qp, qn, 3); |
292 | 0 | if (cy != 0) |
293 | 0 | { |
294 | 0 | if (qh != 0) |
295 | 0 | { |
296 | | /* Return a quotient of just 1-bits, with qh set. */ |
297 | 0 | mp_size_t i; |
298 | 0 | for (i = 0; i < qn; i++) |
299 | 0 | qp[i] = GMP_NUMB_MAX; |
300 | 0 | } |
301 | 0 | else |
302 | 0 | { |
303 | | /* Propagate carry into qh. */ |
304 | 0 | qh = 1; |
305 | 0 | } |
306 | 0 | } |
307 | |
|
308 | 0 | return qh; |
309 | 0 | } |
310 | | |
311 | | /* In case k=0 (automatic choice), we distinguish 3 cases: |
312 | | (a) dn < qn: in = ceil(qn / ceil(qn/dn)) |
313 | | (b) dn/3 < qn <= dn: in = ceil(qn / 2) |
314 | | (c) qn < dn/3: in = qn |
315 | | In all cases we have in <= dn. |
316 | | */ |
317 | | static mp_size_t |
318 | | mpn_mu_divappr_q_choose_in (mp_size_t qn, mp_size_t dn, int k) |
319 | 0 | { |
320 | 0 | mp_size_t in; |
321 | |
|
322 | 0 | if (k == 0) |
323 | 0 | { |
324 | 0 | mp_size_t b; |
325 | 0 | if (qn > dn) |
326 | 0 | { |
327 | | /* Compute an inverse size that is a nice partition of the quotient. */ |
328 | 0 | b = (qn - 1) / dn + 1; /* ceil(qn/dn), number of blocks */ |
329 | 0 | in = (qn - 1) / b + 1; /* ceil(qn/b) = ceil(qn / ceil(qn/dn)) */ |
330 | 0 | } |
331 | 0 | else if (3 * qn > dn) |
332 | 0 | { |
333 | 0 | in = (qn - 1) / 2 + 1; /* b = 2 */ |
334 | 0 | } |
335 | 0 | else |
336 | 0 | { |
337 | 0 | in = (qn - 1) / 1 + 1; /* b = 1 */ |
338 | 0 | } |
339 | 0 | } |
340 | 0 | else |
341 | 0 | { |
342 | 0 | mp_size_t xn; |
343 | 0 | xn = MIN (dn, qn); |
344 | 0 | in = (xn - 1) / k + 1; |
345 | 0 | } |
346 | |
|
347 | 0 | return in; |
348 | 0 | } |
349 | | |
350 | | mp_size_t |
351 | | mpn_mu_divappr_q_itch (mp_size_t nn, mp_size_t dn, int mua_k) |
352 | 0 | { |
353 | 0 | mp_size_t qn, in, itch_local, itch_out, itch_invapp; |
354 | |
|
355 | 0 | qn = nn - dn; |
356 | 0 | if (qn + 1 < dn) |
357 | 0 | { |
358 | 0 | dn = qn + 1; |
359 | 0 | } |
360 | 0 | in = mpn_mu_divappr_q_choose_in (qn, dn, mua_k); |
361 | |
|
362 | 0 | itch_local = mpn_mulmod_bnm1_next_size (dn + 1); |
363 | 0 | itch_out = mpn_mulmod_bnm1_itch (itch_local, dn, in); |
364 | 0 | itch_invapp = mpn_invertappr_itch (in + 1) + in + 2; /* 3in + 4 */ |
365 | |
|
366 | 0 | ASSERT (dn + itch_local + itch_out >= itch_invapp); |
367 | 0 | return in + MAX (dn + itch_local + itch_out, itch_invapp); |
368 | 0 | } |