Coverage Report

Created: 2023-02-22 06:39

/src/gmp-6.2.1/mpn/toom22_mul.c
Line
Count
Source
1
/* mpn_toom22_mul -- Multiply {ap,an} and {bp,bn} where an >= bn.  Or more
2
   accurately, bn <= an < 2bn.
3
4
   Contributed to the GNU project by Torbjorn Granlund.
5
6
   THE FUNCTION IN THIS FILE IS INTERNAL WITH A MUTABLE INTERFACE.  IT IS ONLY
7
   SAFE TO REACH IT THROUGH DOCUMENTED INTERFACES.  IN FACT, IT IS ALMOST
8
   GUARANTEED THAT IT WILL CHANGE OR DISAPPEAR IN A FUTURE GNU MP RELEASE.
9
10
Copyright 2006-2010, 2012, 2014, 2018 Free Software Foundation, Inc.
11
12
This file is part of the GNU MP Library.
13
14
The GNU MP Library is free software; you can redistribute it and/or modify
15
it under the terms of either:
16
17
  * the GNU Lesser General Public License as published by the Free
18
    Software Foundation; either version 3 of the License, or (at your
19
    option) any later version.
20
21
or
22
23
  * the GNU General Public License as published by the Free Software
24
    Foundation; either version 2 of the License, or (at your option) any
25
    later version.
26
27
or both in parallel, as here.
28
29
The GNU MP Library is distributed in the hope that it will be useful, but
30
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
31
or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
32
for more details.
33
34
You should have received copies of the GNU General Public License and the
35
GNU Lesser General Public License along with the GNU MP Library.  If not,
36
see https://www.gnu.org/licenses/.  */
37
38
39
#include "gmp-impl.h"
40
41
/* Evaluate in: -1, 0, +inf
42
43
  <-s--><--n-->
44
   ____ ______
45
  |_a1_|___a0_|
46
   |b1_|___b0_|
47
   <-t-><--n-->
48
49
  v0  =  a0     * b0       #   A(0)*B(0)
50
  vm1 = (a0- a1)*(b0- b1)  #  A(-1)*B(-1)
51
  vinf=      a1 *     b1   # A(inf)*B(inf)
52
*/
53
54
#if TUNE_PROGRAM_BUILD || WANT_FAT_BINARY
55
#define MAYBE_mul_toom22   1
56
#else
57
#define MAYBE_mul_toom22            \
58
123k
  (MUL_TOOM33_THRESHOLD >= 2 * MUL_TOOM22_THRESHOLD)
59
#endif
60
61
#define TOOM22_MUL_N_REC(p, a, b, n, ws)        \
62
58.0k
  do {                 \
63
58.0k
    if (! MAYBE_mul_toom22            \
64
58.0k
  || BELOW_THRESHOLD (n, MUL_TOOM22_THRESHOLD))     \
65
58.0k
      mpn_mul_basecase (p, a, n, b, n);         \
66
58.0k
    else                \
67
58.0k
      mpn_toom22_mul (p, a, n, b, n, ws);       \
68
58.0k
  } while (0)
69
70
/* Normally, this calls mul_basecase or toom22_mul.  But when when the fraction
71
   MUL_TOOM33_THRESHOLD / MUL_TOOM22_THRESHOLD is large, an initially small
72
   relative unbalance will become a larger and larger relative unbalance with
73
   each recursion (the difference s-t will be invariant over recursive calls).
74
   Therefore, we need to call toom32_mul.  FIXME: Suppress depending on
75
   MUL_TOOM33_THRESHOLD / MUL_TOOM22_THRESHOLD and on MUL_TOOM22_THRESHOLD.  */
76
#define TOOM22_MUL_REC(p, a, an, b, bn, ws)       \
77
3.62k
  do {                 \
78
3.62k
    if (! MAYBE_mul_toom22            \
79
3.62k
  || BELOW_THRESHOLD (bn, MUL_TOOM22_THRESHOLD))      \
80
3.62k
      mpn_mul_basecase (p, a, an, b, bn);       \
81
3.62k
    else if (4 * an < 5 * bn)           \
82
241
      mpn_toom22_mul (p, a, an, b, bn, ws);       \
83
241
    else                \
84
241
      mpn_toom32_mul (p, a, an, b, bn, ws);       \
85
3.62k
  } while (0)
86
87
void
88
mpn_toom22_mul (mp_ptr pp,
89
    mp_srcptr ap, mp_size_t an,
90
    mp_srcptr bp, mp_size_t bn,
91
    mp_ptr scratch)
92
20.5k
{
93
20.5k
  const int __gmpn_cpuvec_initialized = 1;
94
20.5k
  mp_size_t n, s, t;
95
20.5k
  int vm1_neg;
96
20.5k
  mp_limb_t cy, cy2;
97
20.5k
  mp_ptr asm1;
98
20.5k
  mp_ptr bsm1;
99
100
52.2k
#define a0  ap
101
31.2k
#define a1  (ap + n)
102
41.4k
#define b0  bp
103
29.2k
#define b1  (bp + n)
104
105
20.5k
  s = an >> 1;
106
20.5k
  n = an - s;
107
20.5k
  t = bn - n;
108
109
20.5k
  ASSERT (an >= bn);
110
111
20.5k
  ASSERT (0 < s && s <= n && s >= n - 1);
112
20.5k
  ASSERT (0 < t && t <= s);
113
114
20.5k
  asm1 = pp;
115
20.5k
  bsm1 = pp + n;
116
117
20.5k
  vm1_neg = 0;
118
119
  /* Compute asm1.  */
120
20.5k
  if (s == n)
121
9.89k
    {
122
9.89k
      if (mpn_cmp (a0, a1, n) < 0)
123
3.59k
  {
124
3.59k
    mpn_sub_n (asm1, a1, a0, n);
125
3.59k
    vm1_neg = 1;
126
3.59k
  }
127
6.30k
      else
128
6.30k
  {
129
6.30k
    mpn_sub_n (asm1, a0, a1, n);
130
6.30k
  }
131
9.89k
    }
132
10.6k
  else /* n - s == 1 */
133
10.6k
    {
134
10.6k
      if (a0[s] == 0 && mpn_cmp (a0, a1, s) < 0)
135
371
  {
136
371
    mpn_sub_n (asm1, a1, a0, s);
137
371
    asm1[s] = 0;
138
371
    vm1_neg = 1;
139
371
  }
140
10.2k
      else
141
10.2k
  {
142
10.2k
    asm1[s] = a0[s] - mpn_sub_n (asm1, a0, a1, s);
143
10.2k
  }
144
10.6k
    }
145
146
  /* Compute bsm1.  */
147
20.5k
  if (t == n)
148
8.38k
    {
149
8.38k
      if (mpn_cmp (b0, b1, n) < 0)
150
2.95k
  {
151
2.95k
    mpn_sub_n (bsm1, b1, b0, n);
152
2.95k
    vm1_neg ^= 1;
153
2.95k
  }
154
5.43k
      else
155
5.43k
  {
156
5.43k
    mpn_sub_n (bsm1, b0, b1, n);
157
5.43k
  }
158
8.38k
    }
159
12.1k
  else
160
12.1k
    {
161
12.1k
      if (mpn_zero_p (b0 + t, n - t) && mpn_cmp (b0, b1, t) < 0)
162
173
  {
163
173
    mpn_sub_n (bsm1, b1, b0, t);
164
173
    MPN_ZERO (bsm1 + t, n - t);
165
173
    vm1_neg ^= 1;
166
173
  }
167
11.9k
      else
168
11.9k
  {
169
11.9k
    mpn_sub (bsm1, b0, n, b1, t);
170
11.9k
  }
171
12.1k
    }
172
173
41.1k
#define v0  pp        /* 2n */
174
41.1k
#define vinf  (pp + 2 * n)      /* s+t */
175
20.5k
#define vm1 scratch        /* 2n */
176
20.5k
#define scratch_out scratch + 2 * n
177
178
  /* vm1, 2n limbs */
179
20.5k
  TOOM22_MUL_N_REC (vm1, asm1, bsm1, n, scratch_out);
180
181
20.5k
  if (s > t)  TOOM22_MUL_REC (vinf, a1, s, b1, t, scratch_out);
182
16.9k
  else        TOOM22_MUL_N_REC (vinf, a1, b1, s, scratch_out);
183
184
  /* v0, 2n limbs */
185
20.5k
  TOOM22_MUL_N_REC (v0, ap, bp, n, scratch_out);
186
187
  /* H(v0) + L(vinf) */
188
20.5k
  cy = mpn_add_n (pp + 2 * n, v0 + n, vinf, n);
189
190
  /* L(v0) + H(v0) */
191
20.5k
  cy2 = cy + mpn_add_n (pp + n, pp + 2 * n, v0, n);
192
193
  /* L(vinf) + H(vinf) */
194
20.5k
  cy += mpn_add (pp + 2 * n, pp + 2 * n, n, vinf + n, s + t - n);
195
196
20.5k
  if (vm1_neg)
197
4.10k
    cy += mpn_add_n (pp + n, pp + n, vm1, 2 * n);
198
16.4k
  else {
199
16.4k
    cy -= mpn_sub_n (pp + n, pp + n, vm1, 2 * n);
200
16.4k
    if (UNLIKELY (cy + 1 == 0)) { /* cy is negative */
201
      /* The total contribution of v0+vinf-vm1 can not be negative. */
202
1
#if WANT_ASSERT
203
      /* The borrow in cy stops the propagation of the carry cy2, */
204
1
      ASSERT (cy2 == 1);
205
1
      cy += mpn_add_1 (pp + 2 * n, pp + 2 * n, n, cy2);
206
1
      ASSERT (cy == 0);
207
#else
208
      /* we simply fill the area with zeros. */
209
      MPN_FILL (pp + 2 * n, n, 0);
210
#endif
211
1
      return;
212
1
    }
213
16.4k
  }
214
215
20.5k
  ASSERT (cy  <= 2);
216
20.5k
  ASSERT (cy2 <= 2);
217
218
20.5k
  MPN_INCR_U (pp + 2 * n, s + t, cy2);
219
  /* if s+t==n, cy is zero, but we should not access pp[3*n] at all. */
220
20.5k
  MPN_INCR_U (pp + 3 * n, s + t - n, cy);
221
20.5k
}