Coverage Report

Created: 2025-12-31 06:37

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/gmp/mpn/toom22_mul.c
Line
Count
Source
1
/* mpn_toom22_mul -- Multiply {ap,an} and {bp,bn} where an >= bn.  Or more
2
   accurately, bn <= an < 2bn.
3
4
   Contributed to the GNU project by Torbjorn Granlund.
5
6
   THE FUNCTION IN THIS FILE IS INTERNAL WITH A MUTABLE INTERFACE.  IT IS ONLY
7
   SAFE TO REACH IT THROUGH DOCUMENTED INTERFACES.  IN FACT, IT IS ALMOST
8
   GUARANTEED THAT IT WILL CHANGE OR DISAPPEAR IN A FUTURE GNU MP RELEASE.
9
10
Copyright 2006-2010, 2012, 2014, 2018, 2020 Free Software Foundation, Inc.
11
12
This file is part of the GNU MP Library.
13
14
The GNU MP Library is free software; you can redistribute it and/or modify
15
it under the terms of either:
16
17
  * the GNU Lesser General Public License as published by the Free
18
    Software Foundation; either version 3 of the License, or (at your
19
    option) any later version.
20
21
or
22
23
  * the GNU General Public License as published by the Free Software
24
    Foundation; either version 2 of the License, or (at your option) any
25
    later version.
26
27
or both in parallel, as here.
28
29
The GNU MP Library is distributed in the hope that it will be useful, but
30
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
31
or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
32
for more details.
33
34
You should have received copies of the GNU General Public License and the
35
GNU Lesser General Public License along with the GNU MP Library.  If not,
36
see https://www.gnu.org/licenses/.  */
37
38
39
#include "gmp-impl.h"
40
41
/* Evaluate in: -1, 0, +inf
42
43
  <-s--><--n-->
44
   ____ ______
45
  |_a1_|___a0_|
46
   |b1_|___b0_|
47
   <-t-><--n-->
48
49
  v0  =  a0     * b0       #   A(0)*B(0)
50
  vm1 = (a0- a1)*(b0- b1)  #  A(-1)*B(-1)
51
  vinf=      a1 *     b1   # A(inf)*B(inf)
52
*/
53
54
#if TUNE_PROGRAM_BUILD || WANT_FAT_BINARY
55
#define MAYBE_mul_toom22   1
56
#else
57
#define MAYBE_mul_toom22            \
58
246M
  (MUL_TOOM33_THRESHOLD >= 2 * MUL_TOOM22_THRESHOLD)
59
#endif
60
61
#define TOOM22_MUL_N_REC(p, a, b, n, ws)        \
62
123M
  do {                 \
63
123M
    if (! MAYBE_mul_toom22            \
64
123M
  || BELOW_THRESHOLD (n, MUL_TOOM22_THRESHOLD))     \
65
123M
      mpn_mul_basecase (p, a, n, b, n);         \
66
123M
    else                \
67
123M
      mpn_toom22_mul (p, a, n, b, n, ws);       \
68
123M
  } while (0)
69
70
/* Normally, this calls mul_basecase or toom22_mul.  But when when the fraction
71
   MUL_TOOM33_THRESHOLD / MUL_TOOM22_THRESHOLD is large, an initially small
72
   relative unbalance will become a larger and larger relative unbalance with
73
   each recursion (the difference s-t will be invariant over recursive calls).
74
   Therefore, we need to call toom32_mul.  FIXME: Suppress depending on
75
   MUL_TOOM33_THRESHOLD / MUL_TOOM22_THRESHOLD and on MUL_TOOM22_THRESHOLD.  */
76
#define TOOM22_MUL_REC(p, a, an, b, bn, ws)       \
77
7.04k
  do {                 \
78
7.04k
    if (! MAYBE_mul_toom22            \
79
7.04k
  || BELOW_THRESHOLD (bn, MUL_TOOM22_THRESHOLD))      \
80
7.04k
      mpn_mul_basecase (p, a, an, b, bn);       \
81
7.04k
    else if (4 * an < 5 * bn)           \
82
596
      mpn_toom22_mul (p, a, an, b, bn, ws);       \
83
596
    else                \
84
596
      mpn_toom32_mul (p, a, an, b, bn, ws);       \
85
7.04k
  } while (0)
86
87
void
88
mpn_toom22_mul (mp_ptr pp,
89
    mp_srcptr ap, mp_size_t an,
90
    mp_srcptr bp, mp_size_t bn,
91
    mp_ptr scratch)
92
41.0M
{
93
41.0M
  const int __gmpn_cpuvec_initialized = 1;
94
41.0M
  mp_size_t n, s, t;
95
41.0M
  int vm1_neg;
96
41.0M
  mp_limb_t cy, cy2;
97
41.0M
  mp_ptr asm1;
98
41.0M
  mp_ptr bsm1;
99
100
97.7M
#define a0  ap
101
67.2M
#define a1  (ap + n)
102
84.2M
#define b0  bp
103
68.9M
#define b1  (bp + n)
104
105
41.0M
  s = an >> 1;
106
41.0M
  n = an - s;
107
41.0M
  t = bn - n;
108
109
41.0M
  ASSERT (an >= bn);
110
111
41.0M
  ASSERT (0 < s && s <= n && (n - s) == (an & 1));
112
41.0M
  ASSERT (0 < t && t <= s);
113
114
41.0M
  asm1 = pp;
115
41.0M
  bsm1 = pp + n;
116
117
41.0M
  vm1_neg = 0;
118
119
  /* Compute asm1.  */
120
41.0M
  if ((an & 1) == 0) /* s == n */
121
25.6M
    {
122
25.6M
      if (mpn_cmp (a0, a1, n) < 0)
123
10.6M
  {
124
10.6M
    mpn_sub_n (asm1, a1, a0, n);
125
10.6M
    vm1_neg = 1;
126
10.6M
  }
127
15.0M
      else
128
15.0M
  {
129
15.0M
    mpn_sub_n (asm1, a0, a1, n);
130
15.0M
  }
131
25.6M
    }
132
15.3M
  else /* n - s == 1 */
133
15.3M
    {
134
15.3M
      if (a0[s] == 0 && mpn_cmp (a0, a1, s) < 0)
135
202k
  {
136
202k
    mpn_sub_n (asm1, a1, a0, s);
137
202k
    asm1[s] = 0;
138
202k
    vm1_neg = 1;
139
202k
  }
140
15.1M
      else
141
15.1M
  {
142
15.1M
    asm1[s] = a0[s] - mpn_sub_n (asm1, a0, a1, s);
143
15.1M
  }
144
15.3M
    }
145
146
  /* Compute bsm1.  */
147
41.0M
  if (t == n)
148
25.6M
    {
149
25.6M
      if (mpn_cmp (b0, b1, n) < 0)
150
12.0M
  {
151
12.0M
    mpn_sub_n (bsm1, b1, b0, n);
152
12.0M
    vm1_neg ^= 1;
153
12.0M
  }
154
13.6M
      else
155
13.6M
  {
156
13.6M
    mpn_sub_n (bsm1, b0, b1, n);
157
13.6M
  }
158
25.6M
    }
159
15.3M
  else
160
15.3M
    {
161
15.3M
      if (mpn_zero_p (b0 + t, n - t) && mpn_cmp (b0, b1, t) < 0)
162
1.14M
  {
163
1.14M
    mpn_sub_n (bsm1, b1, b0, t);
164
1.14M
    MPN_ZERO (bsm1 + t, n - t);
165
1.14M
    vm1_neg ^= 1;
166
1.14M
  }
167
14.1M
      else
168
14.1M
  {
169
14.1M
    mpn_sub (bsm1, b0, n, b1, t);
170
14.1M
  }
171
15.3M
    }
172
173
82.0M
#define v0  pp        /* 2n */
174
82.0M
#define vinf  (pp + 2 * n)      /* s+t */
175
41.0M
#define vm1 scratch        /* 2n */
176
41.0M
#define scratch_out scratch + 2 * n
177
178
  /* vm1, 2n limbs */
179
41.0M
  TOOM22_MUL_N_REC (vm1, asm1, bsm1, n, scratch_out);
180
181
41.0M
  if (s > t)  TOOM22_MUL_REC (vinf, a1, s, b1, t, scratch_out);
182
41.0M
  else        TOOM22_MUL_N_REC (vinf, a1, b1, s, scratch_out);
183
184
  /* v0, 2n limbs */
185
41.0M
  TOOM22_MUL_N_REC (v0, ap, bp, n, scratch_out);
186
187
  /* H(v0) + L(vinf) */
188
41.0M
  cy = mpn_add_n (pp + 2 * n, v0 + n, vinf, n);
189
190
  /* L(v0) + (H(v0) + L(vinf)) */
191
41.0M
  cy2 = cy + mpn_add_n (pp + n, pp + 2 * n, v0, n);
192
193
  /* (H(v0) + L(vinf)) + H(vinf) */
194
41.0M
  cy += mpn_add (pp + 2 * n, pp + 2 * n, n, vinf + n, s + t - n);
195
196
41.0M
  if (vm1_neg)
197
12.2M
    cy += mpn_add_n (pp + n, pp + n, vm1, 2 * n);
198
28.7M
  else {
199
28.7M
    cy -= mpn_sub_n (pp + n, pp + n, vm1, 2 * n);
200
28.7M
    if (UNLIKELY (cy + 1 == 0)) { /* cy is negative */
201
      /* The total contribution of v0+vinf-vm1 can not be negative. */
202
#if WANT_ASSERT
203
      /* The borrow in cy stops the propagation of the carry cy2, */
204
      ASSERT (cy2 == 1);
205
      cy += mpn_add_1 (pp + 2 * n, pp + 2 * n, n, cy2);
206
      ASSERT (cy == 0);
207
#else
208
      /* we simply fill the area with zeros. */
209
3.00k
      MPN_FILL (pp + 2 * n, n, 0);
210
      /* ASSERT (s + t == n || mpn_zero_p (pp + 3 * n, s + t - n)); */
211
3.00k
#endif
212
3.00k
      return;
213
3.00k
    }
214
28.7M
  }
215
216
41.0M
  ASSERT (cy  <= 2);
217
41.0M
  ASSERT (cy2 <= 2);
218
219
41.0M
  MPN_INCR_U (pp + 2 * n, s + t, cy2);
220
  /* if s+t==n, cy is zero, but we should not access pp[3*n] at all. */
221
41.0M
  MPN_INCR_U (pp + 3 * n, s + t - n, cy);
222
41.0M
}