Coverage Report

Created: 2023-02-22 06:14

/src/gmp-6.2.1/mpn/toom22_mul.c
Line
Count
Source (jump to first uncovered line)
1
/* mpn_toom22_mul -- Multiply {ap,an} and {bp,bn} where an >= bn.  Or more
2
   accurately, bn <= an < 2bn.
3
4
   Contributed to the GNU project by Torbjorn Granlund.
5
6
   THE FUNCTION IN THIS FILE IS INTERNAL WITH A MUTABLE INTERFACE.  IT IS ONLY
7
   SAFE TO REACH IT THROUGH DOCUMENTED INTERFACES.  IN FACT, IT IS ALMOST
8
   GUARANTEED THAT IT WILL CHANGE OR DISAPPEAR IN A FUTURE GNU MP RELEASE.
9
10
Copyright 2006-2010, 2012, 2014, 2018 Free Software Foundation, Inc.
11
12
This file is part of the GNU MP Library.
13
14
The GNU MP Library is free software; you can redistribute it and/or modify
15
it under the terms of either:
16
17
  * the GNU Lesser General Public License as published by the Free
18
    Software Foundation; either version 3 of the License, or (at your
19
    option) any later version.
20
21
or
22
23
  * the GNU General Public License as published by the Free Software
24
    Foundation; either version 2 of the License, or (at your option) any
25
    later version.
26
27
or both in parallel, as here.
28
29
The GNU MP Library is distributed in the hope that it will be useful, but
30
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
31
or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
32
for more details.
33
34
You should have received copies of the GNU General Public License and the
35
GNU Lesser General Public License along with the GNU MP Library.  If not,
36
see https://www.gnu.org/licenses/.  */
37
38
39
#include "gmp-impl.h"
40
41
/* Evaluate in: -1, 0, +inf
42
43
  <-s--><--n-->
44
   ____ ______
45
  |_a1_|___a0_|
46
   |b1_|___b0_|
47
   <-t-><--n-->
48
49
  v0  =  a0     * b0       #   A(0)*B(0)
50
  vm1 = (a0- a1)*(b0- b1)  #  A(-1)*B(-1)
51
  vinf=      a1 *     b1   # A(inf)*B(inf)
52
*/
53
54
#if TUNE_PROGRAM_BUILD || WANT_FAT_BINARY
55
#define MAYBE_mul_toom22   1
56
#else
57
#define MAYBE_mul_toom22            \
58
129k
  (MUL_TOOM33_THRESHOLD >= 2 * MUL_TOOM22_THRESHOLD)
59
#endif
60
61
#define TOOM22_MUL_N_REC(p, a, b, n, ws)        \
62
62.3k
  do {                 \
63
62.3k
    if (! MAYBE_mul_toom22            \
64
62.3k
  || BELOW_THRESHOLD (n, MUL_TOOM22_THRESHOLD))     \
65
62.3k
      mpn_mul_basecase (p, a, n, b, n);         \
66
62.3k
    else                \
67
62.3k
      mpn_toom22_mul (p, a, n, b, n, ws);       \
68
62.3k
  } while (0)
69
70
/* Normally, this calls mul_basecase or toom22_mul.  But when when the fraction
71
   MUL_TOOM33_THRESHOLD / MUL_TOOM22_THRESHOLD is large, an initially small
72
   relative unbalance will become a larger and larger relative unbalance with
73
   each recursion (the difference s-t will be invariant over recursive calls).
74
   Therefore, we need to call toom32_mul.  FIXME: Suppress depending on
75
   MUL_TOOM33_THRESHOLD / MUL_TOOM22_THRESHOLD and on MUL_TOOM22_THRESHOLD.  */
76
#define TOOM22_MUL_REC(p, a, an, b, bn, ws)       \
77
2.27k
  do {                 \
78
2.27k
    if (! MAYBE_mul_toom22            \
79
2.27k
  || BELOW_THRESHOLD (bn, MUL_TOOM22_THRESHOLD))      \
80
2.27k
      mpn_mul_basecase (p, a, an, b, bn);       \
81
2.27k
    else if (4 * an < 5 * bn)           \
82
35
      mpn_toom22_mul (p, a, an, b, bn, ws);       \
83
35
    else                \
84
35
      mpn_toom32_mul (p, a, an, b, bn, ws);       \
85
2.27k
  } while (0)
86
87
void
88
mpn_toom22_mul (mp_ptr pp,
89
    mp_srcptr ap, mp_size_t an,
90
    mp_srcptr bp, mp_size_t bn,
91
    mp_ptr scratch)
92
21.5k
{
93
21.5k
  const int __gmpn_cpuvec_initialized = 1;
94
21.5k
  mp_size_t n, s, t;
95
21.5k
  int vm1_neg;
96
21.5k
  mp_limb_t cy, cy2;
97
21.5k
  mp_ptr asm1;
98
21.5k
  mp_ptr bsm1;
99
100
54.0k
#define a0  ap
101
33.4k
#define a1  (ap + n)
102
43.2k
#define b0  bp
103
32.0k
#define b1  (bp + n)
104
105
21.5k
  s = an >> 1;
106
21.5k
  n = an - s;
107
21.5k
  t = bn - n;
108
109
21.5k
  ASSERT (an >= bn);
110
111
21.5k
  ASSERT (0 < s && s <= n && s >= n - 1);
112
21.5k
  ASSERT (0 < t && t <= s);
113
114
21.5k
  asm1 = pp;
115
21.5k
  bsm1 = pp + n;
116
117
21.5k
  vm1_neg = 0;
118
119
  /* Compute asm1.  */
120
21.5k
  if (s == n)
121
11.1k
    {
122
11.1k
      if (mpn_cmp (a0, a1, n) < 0)
123
3.29k
  {
124
3.29k
    mpn_sub_n (asm1, a1, a0, n);
125
3.29k
    vm1_neg = 1;
126
3.29k
  }
127
7.86k
      else
128
7.86k
  {
129
7.86k
    mpn_sub_n (asm1, a0, a1, n);
130
7.86k
  }
131
11.1k
    }
132
10.3k
  else /* n - s == 1 */
133
10.3k
    {
134
10.3k
      if (a0[s] == 0 && mpn_cmp (a0, a1, s) < 0)
135
169
  {
136
169
    mpn_sub_n (asm1, a1, a0, s);
137
169
    asm1[s] = 0;
138
169
    vm1_neg = 1;
139
169
  }
140
10.2k
      else
141
10.2k
  {
142
10.2k
    asm1[s] = a0[s] - mpn_sub_n (asm1, a0, a1, s);
143
10.2k
  }
144
10.3k
    }
145
146
  /* Compute bsm1.  */
147
21.5k
  if (t == n)
148
10.3k
    {
149
10.3k
      if (mpn_cmp (b0, b1, n) < 0)
150
3.78k
  {
151
3.78k
    mpn_sub_n (bsm1, b1, b0, n);
152
3.78k
    vm1_neg ^= 1;
153
3.78k
  }
154
6.58k
      else
155
6.58k
  {
156
6.58k
    mpn_sub_n (bsm1, b0, b1, n);
157
6.58k
  }
158
10.3k
    }
159
11.1k
  else
160
11.1k
    {
161
11.1k
      if (mpn_zero_p (b0 + t, n - t) && mpn_cmp (b0, b1, t) < 0)
162
0
  {
163
0
    mpn_sub_n (bsm1, b1, b0, t);
164
0
    MPN_ZERO (bsm1 + t, n - t);
165
0
    vm1_neg ^= 1;
166
0
  }
167
11.1k
      else
168
11.1k
  {
169
11.1k
    mpn_sub (bsm1, b0, n, b1, t);
170
11.1k
  }
171
11.1k
    }
172
173
43.0k
#define v0  pp        /* 2n */
174
43.0k
#define vinf  (pp + 2 * n)      /* s+t */
175
21.5k
#define vm1 scratch        /* 2n */
176
21.5k
#define scratch_out scratch + 2 * n
177
178
  /* vm1, 2n limbs */
179
21.5k
  TOOM22_MUL_N_REC (vm1, asm1, bsm1, n, scratch_out);
180
181
21.5k
  if (s > t)  TOOM22_MUL_REC (vinf, a1, s, b1, t, scratch_out);
182
19.2k
  else        TOOM22_MUL_N_REC (vinf, a1, b1, s, scratch_out);
183
184
  /* v0, 2n limbs */
185
21.5k
  TOOM22_MUL_N_REC (v0, ap, bp, n, scratch_out);
186
187
  /* H(v0) + L(vinf) */
188
21.5k
  cy = mpn_add_n (pp + 2 * n, v0 + n, vinf, n);
189
190
  /* L(v0) + H(v0) */
191
21.5k
  cy2 = cy + mpn_add_n (pp + n, pp + 2 * n, v0, n);
192
193
  /* L(vinf) + H(vinf) */
194
21.5k
  cy += mpn_add (pp + 2 * n, pp + 2 * n, n, vinf + n, s + t - n);
195
196
21.5k
  if (vm1_neg)
197
3.73k
    cy += mpn_add_n (pp + n, pp + n, vm1, 2 * n);
198
17.7k
  else {
199
17.7k
    cy -= mpn_sub_n (pp + n, pp + n, vm1, 2 * n);
200
17.7k
    if (UNLIKELY (cy + 1 == 0)) { /* cy is negative */
201
      /* The total contribution of v0+vinf-vm1 can not be negative. */
202
#if WANT_ASSERT
203
      /* The borrow in cy stops the propagation of the carry cy2, */
204
      ASSERT (cy2 == 1);
205
      cy += mpn_add_1 (pp + 2 * n, pp + 2 * n, n, cy2);
206
      ASSERT (cy == 0);
207
#else
208
      /* we simply fill the area with zeros. */
209
0
      MPN_FILL (pp + 2 * n, n, 0);
210
0
#endif
211
0
      return;
212
0
    }
213
17.7k
  }
214
215
21.5k
  ASSERT (cy  <= 2);
216
21.5k
  ASSERT (cy2 <= 2);
217
218
21.5k
  MPN_INCR_U (pp + 2 * n, s + t, cy2);
219
  /* if s+t==n, cy is zero, but we should not access pp[3*n] at all. */
220
21.5k
  MPN_INCR_U (pp + 3 * n, s + t - n, cy);
221
21.5k
}