Coverage Report

Created: 2025-12-31 06:37

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/gmp/mpn/gcdext_lehmer.c
Line
Count
Source
1
/* mpn_gcdext -- Extended Greatest Common Divisor.
2
3
Copyright 1996, 1998, 2000-2005, 2008, 2009, 2012 Free Software Foundation,
4
Inc.
5
6
This file is part of the GNU MP Library.
7
8
The GNU MP Library is free software; you can redistribute it and/or modify
9
it under the terms of either:
10
11
  * the GNU Lesser General Public License as published by the Free
12
    Software Foundation; either version 3 of the License, or (at your
13
    option) any later version.
14
15
or
16
17
  * the GNU General Public License as published by the Free Software
18
    Foundation; either version 2 of the License, or (at your option) any
19
    later version.
20
21
or both in parallel, as here.
22
23
The GNU MP Library is distributed in the hope that it will be useful, but
24
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
25
or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
26
for more details.
27
28
You should have received copies of the GNU General Public License and the
29
GNU Lesser General Public License along with the GNU MP Library.  If not,
30
see https://www.gnu.org/licenses/.  */
31
32
#include "gmp-impl.h"
33
#include "longlong.h"
34
35
/* Here, d is the index of the cofactor to update. FIXME: Could use qn
36
   = 0 for the common case q = 1. */
37
void
38
mpn_gcdext_hook (void *p, mp_srcptr gp, mp_size_t gn,
39
     mp_srcptr qp, mp_size_t qn, int d)
40
17.2k
{
41
17.2k
  struct gcdext_ctx *ctx = (struct gcdext_ctx *) p;
42
17.2k
  mp_size_t un = ctx->un;
43
44
17.2k
  if (gp)
45
386
    {
46
386
      mp_srcptr up;
47
48
386
      ASSERT (gn > 0);
49
386
      ASSERT (gp[gn-1] > 0);
50
51
386
      MPN_COPY (ctx->gp, gp, gn);
52
386
      ctx->gn = gn;
53
54
386
      if (d < 0)
55
166
  {
56
166
    int c;
57
58
    /* Must return the smallest cofactor, +u1 or -u0 */
59
166
    MPN_CMP (c, ctx->u0, ctx->u1, un);
60
166
    ASSERT (c != 0 || (un == 1 && ctx->u0[0] == 1 && ctx->u1[0] == 1));
61
62
166
    d = c < 0;
63
166
  }
64
65
386
      up = d ? ctx->u0 : ctx->u1;
66
67
386
      MPN_NORMALIZE (up, un);
68
386
      MPN_COPY (ctx->up, up, un);
69
70
386
      *ctx->usize = d ? -un : un;
71
386
    }
72
16.8k
  else
73
16.8k
    {
74
16.8k
      mp_limb_t cy;
75
16.8k
      mp_ptr u0 = ctx->u0;
76
16.8k
      mp_ptr u1 = ctx->u1;
77
78
16.8k
      ASSERT (d >= 0);
79
80
16.8k
      if (d)
81
7.86k
  MP_PTR_SWAP (u0, u1);
82
83
16.8k
      qn -= (qp[qn-1] == 0);
84
85
      /* Update u0 += q  * u1 */
86
16.8k
      if (qn == 1)
87
11.0k
  {
88
11.0k
    mp_limb_t q = qp[0];
89
90
11.0k
    if (q == 1)
91
      /* A common case. */
92
8.55k
      cy = mpn_add_n (u0, u0, u1, un);
93
2.48k
    else
94
2.48k
      cy = mpn_addmul_1 (u0, u1, un, q);
95
11.0k
  }
96
5.84k
      else
97
5.84k
  {
98
5.84k
    mp_size_t u1n;
99
5.84k
    mp_ptr tp;
100
101
5.84k
    u1n = un;
102
5.84k
    MPN_NORMALIZE (u1, u1n);
103
104
5.84k
    if (u1n == 0)
105
0
      return;
106
107
    /* Should always have u1n == un here, and u1 >= u0. The
108
       reason is that we alternate adding u0 to u1 and u1 to u0
109
       (corresponding to subtractions a - b and b - a), and we
110
       can get a large quotient only just after a switch, which
111
       means that we'll add (a multiple of) the larger u to the
112
       smaller. */
113
114
5.84k
    tp = ctx->tp;
115
116
5.84k
    if (qn > u1n)
117
1.59k
      mpn_mul (tp, qp, qn, u1, u1n);
118
4.25k
    else
119
4.25k
      mpn_mul (tp, u1, u1n, qp, qn);
120
121
5.84k
    u1n += qn;
122
5.84k
    u1n -= tp[u1n-1] == 0;
123
124
5.84k
    if (u1n >= un)
125
5.84k
      {
126
5.84k
        cy = mpn_add (u0, tp, u1n, u0, un);
127
5.84k
        un = u1n;
128
5.84k
      }
129
0
    else
130
      /* Note: Unlikely case, maybe never happens? */
131
0
      cy = mpn_add (u0, u0, un, tp, u1n);
132
133
5.84k
  }
134
16.8k
      u0[un] = cy;
135
16.8k
      ctx->un = un + (cy > 0);
136
16.8k
    }
137
17.2k
}
138
139
/* Temporary storage: 3*(n+1) for u. If hgcd2 succeeds, we need n for
140
   the matrix-vector multiplication adjusting a, b. If hgcd fails, we
141
   need at most n for the quotient and n+1 for the u update (reusing
142
   the extra u). In all, 4n + 3. */
143
144
mp_size_t
145
mpn_gcdext_lehmer_n (mp_ptr gp, mp_ptr up, mp_size_t *usize,
146
         mp_ptr ap, mp_ptr bp, mp_size_t n,
147
         mp_ptr tp)
148
38.6k
{
149
38.6k
  mp_size_t ualloc = n + 1;
150
151
  /* Keeps track of the second row of the reduction matrix
152
   *
153
   *   M = (v0, v1 ; u0, u1)
154
   *
155
   * which correspond to the first column of the inverse
156
   *
157
   *   M^{-1} = (u1, -v1; -u0, v0)
158
   *
159
   * This implies that
160
   *
161
   *   a =  u1 A (mod B)
162
   *   b = -u0 A (mod B)
163
   *
164
   * where A, B denotes the input values.
165
   */
166
167
38.6k
  struct gcdext_ctx ctx;
168
38.6k
  mp_size_t un;
169
38.6k
  mp_ptr u0;
170
38.6k
  mp_ptr u1;
171
38.6k
  mp_ptr u2;
172
173
38.6k
  MPN_ZERO (tp, 3*ualloc);
174
38.6k
  u0 = tp; tp += ualloc;
175
38.6k
  u1 = tp; tp += ualloc;
176
38.6k
  u2 = tp; tp += ualloc;
177
178
38.6k
  u1[0] = 1; un = 1;
179
180
38.6k
  ctx.gp = gp;
181
38.6k
  ctx.up = up;
182
38.6k
  ctx.usize = usize;
183
184
  /* FIXME: Handle n == 2 differently, after the loop? */
185
695k
  while (n >= 2)
186
657k
    {
187
657k
      struct hgcd_matrix1 M;
188
657k
      mp_limb_t ah, al, bh, bl;
189
657k
      mp_limb_t mask;
190
191
657k
      mask = ap[n-1] | bp[n-1];
192
657k
      ASSERT (mask > 0);
193
194
657k
      if (mask & GMP_NUMB_HIGHBIT)
195
28.4k
  {
196
28.4k
    ah = ap[n-1]; al = ap[n-2];
197
28.4k
    bh = bp[n-1]; bl = bp[n-2];
198
28.4k
  }
199
628k
      else if (n == 2)
200
35.1k
  {
201
    /* We use the full inputs without truncation, so we can
202
       safely shift left. */
203
35.1k
    int shift;
204
205
35.1k
    count_leading_zeros (shift, mask);
206
35.1k
    ah = MPN_EXTRACT_NUMB (shift, ap[1], ap[0]);
207
35.1k
    al = ap[0] << shift;
208
35.1k
    bh = MPN_EXTRACT_NUMB (shift, bp[1], bp[0]);
209
35.1k
    bl = bp[0] << shift;
210
35.1k
  }
211
593k
      else
212
593k
  {
213
593k
    int shift;
214
215
593k
    count_leading_zeros (shift, mask);
216
593k
    ah = MPN_EXTRACT_NUMB (shift, ap[n-1], ap[n-2]);
217
593k
    al = MPN_EXTRACT_NUMB (shift, ap[n-2], ap[n-3]);
218
593k
    bh = MPN_EXTRACT_NUMB (shift, bp[n-1], bp[n-2]);
219
593k
    bl = MPN_EXTRACT_NUMB (shift, bp[n-2], bp[n-3]);
220
593k
  }
221
222
      /* Try an mpn_nhgcd2 step */
223
657k
      if (mpn_hgcd2 (ah, al, bh, bl, &M))
224
648k
  {
225
648k
    n = mpn_matrix22_mul1_inverse_vector (&M, tp, ap, bp, n);
226
648k
    MP_PTR_SWAP (ap, tp);
227
648k
    un = mpn_hgcd_mul_matrix1_vector(&M, u2, u0, u1, un);
228
648k
    MP_PTR_SWAP (u0, u2);
229
648k
  }
230
8.72k
      else
231
8.72k
  {
232
    /* mpn_hgcd2 has failed. Then either one of a or b is very
233
       small, or the difference is very small. Perform one
234
       subtraction followed by one division. */
235
8.72k
    ctx.u0 = u0;
236
8.72k
    ctx.u1 = u1;
237
8.72k
    ctx.tp = u2;
238
8.72k
    ctx.un = un;
239
240
    /* Temporary storage n for the quotient and ualloc for the
241
       new cofactor. */
242
8.72k
    n = mpn_gcd_subdiv_step (ap, bp, n, 0, mpn_gcdext_hook, &ctx, tp);
243
8.72k
    if (n == 0)
244
386
      return ctx.gn;
245
246
8.33k
    un = ctx.un;
247
8.33k
  }
248
657k
    }
249
38.2k
  ASSERT_ALWAYS (ap[0] > 0);
250
38.2k
  ASSERT_ALWAYS (bp[0] > 0);
251
252
38.2k
  if (ap[0] == bp[0])
253
1.07k
    {
254
1.07k
      int c;
255
256
      /* Which cofactor to return now? Candidates are +u1 and -u0,
257
   depending on which of a and b was most recently reduced,
258
   which we don't keep track of. So compare and get the smallest
259
   one. */
260
261
1.07k
      gp[0] = ap[0];
262
263
1.07k
      MPN_CMP (c, u0, u1, un);
264
1.07k
      ASSERT (c != 0 || (un == 1 && u0[0] == 1 && u1[0] == 1));
265
1.07k
      if (c < 0)
266
555
  {
267
555
    MPN_NORMALIZE (u0, un);
268
555
    MPN_COPY (up, u0, un);
269
555
    *usize = -un;
270
555
  }
271
520
      else
272
520
  {
273
520
    MPN_NORMALIZE_NOT_ZERO (u1, un);
274
520
    MPN_COPY (up, u1, un);
275
520
    *usize = un;
276
520
  }
277
1.07k
      return 1;
278
1.07k
    }
279
37.1k
  else
280
37.1k
    {
281
37.1k
      mp_limb_t uh, vh;
282
37.1k
      mp_limb_signed_t u;
283
37.1k
      mp_limb_signed_t v;
284
37.1k
      int negate;
285
286
37.1k
      gp[0] = mpn_gcdext_1 (&u, &v, ap[0], bp[0]);
287
288
      /* Set up = u u1 - v u0. Keep track of size, un grows by one or
289
   two limbs. */
290
291
37.1k
      if (u == 0)
292
79
  {
293
79
    ASSERT (v == 1);
294
79
    MPN_NORMALIZE (u0, un);
295
79
    MPN_COPY (up, u0, un);
296
79
    *usize = -un;
297
79
    return 1;
298
79
  }
299
37.0k
      else if (v == 0)
300
447
  {
301
447
    ASSERT (u == 1);
302
447
    MPN_NORMALIZE (u1, un);
303
447
    MPN_COPY (up, u1, un);
304
447
    *usize = un;
305
447
    return 1;
306
447
  }
307
36.6k
      else if (u > 0)
308
29.2k
  {
309
29.2k
    negate = 0;
310
29.2k
    ASSERT (v < 0);
311
29.2k
    v = -v;
312
29.2k
  }
313
7.38k
      else
314
7.38k
  {
315
7.38k
    negate = 1;
316
7.38k
    ASSERT (v > 0);
317
7.38k
    u = -u;
318
7.38k
  }
319
320
36.6k
      uh = mpn_mul_1 (up, u1, un, u);
321
36.6k
      vh = mpn_addmul_1 (up, u0, un, v);
322
323
36.6k
      if ( (uh | vh) > 0)
324
4.54k
  {
325
4.54k
    uh += vh;
326
4.54k
    up[un++] = uh;
327
4.54k
    if (uh < vh)
328
0
      up[un++] = 1;
329
4.54k
  }
330
331
36.6k
      MPN_NORMALIZE_NOT_ZERO (up, un);
332
333
36.6k
      *usize = negate ? -un : un;
334
36.6k
      return 1;
335
37.1k
    }
336
38.2k
}