Coverage Report

Created: 2025-11-16 06:46

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/gmp/mpn/gcdext_lehmer.c
Line
Count
Source
1
/* mpn_gcdext -- Extended Greatest Common Divisor.
2
3
Copyright 1996, 1998, 2000-2005, 2008, 2009, 2012 Free Software Foundation,
4
Inc.
5
6
This file is part of the GNU MP Library.
7
8
The GNU MP Library is free software; you can redistribute it and/or modify
9
it under the terms of either:
10
11
  * the GNU Lesser General Public License as published by the Free
12
    Software Foundation; either version 3 of the License, or (at your
13
    option) any later version.
14
15
or
16
17
  * the GNU General Public License as published by the Free Software
18
    Foundation; either version 2 of the License, or (at your option) any
19
    later version.
20
21
or both in parallel, as here.
22
23
The GNU MP Library is distributed in the hope that it will be useful, but
24
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
25
or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
26
for more details.
27
28
You should have received copies of the GNU General Public License and the
29
GNU Lesser General Public License along with the GNU MP Library.  If not,
30
see https://www.gnu.org/licenses/.  */
31
32
#include "gmp-impl.h"
33
#include "longlong.h"
34
35
/* Here, d is the index of the cofactor to update. FIXME: Could use qn
36
   = 0 for the common case q = 1. */
37
void
38
mpn_gcdext_hook (void *p, mp_srcptr gp, mp_size_t gn,
39
     mp_srcptr qp, mp_size_t qn, int d)
40
16.0k
{
41
16.0k
  struct gcdext_ctx *ctx = (struct gcdext_ctx *) p;
42
16.0k
  mp_size_t un = ctx->un;
43
44
16.0k
  if (gp)
45
375
    {
46
375
      mp_srcptr up;
47
48
375
      ASSERT (gn > 0);
49
375
      ASSERT (gp[gn-1] > 0);
50
51
375
      MPN_COPY (ctx->gp, gp, gn);
52
375
      ctx->gn = gn;
53
54
375
      if (d < 0)
55
168
  {
56
168
    int c;
57
58
    /* Must return the smallest cofactor, +u1 or -u0 */
59
168
    MPN_CMP (c, ctx->u0, ctx->u1, un);
60
168
    ASSERT (c != 0 || (un == 1 && ctx->u0[0] == 1 && ctx->u1[0] == 1));
61
62
168
    d = c < 0;
63
168
  }
64
65
375
      up = d ? ctx->u0 : ctx->u1;
66
67
375
      MPN_NORMALIZE (up, un);
68
375
      MPN_COPY (ctx->up, up, un);
69
70
375
      *ctx->usize = d ? -un : un;
71
375
    }
72
15.6k
  else
73
15.6k
    {
74
15.6k
      mp_limb_t cy;
75
15.6k
      mp_ptr u0 = ctx->u0;
76
15.6k
      mp_ptr u1 = ctx->u1;
77
78
15.6k
      ASSERT (d >= 0);
79
80
15.6k
      if (d)
81
7.34k
  MP_PTR_SWAP (u0, u1);
82
83
15.6k
      qn -= (qp[qn-1] == 0);
84
85
      /* Update u0 += q  * u1 */
86
15.6k
      if (qn == 1)
87
10.3k
  {
88
10.3k
    mp_limb_t q = qp[0];
89
90
10.3k
    if (q == 1)
91
      /* A common case. */
92
7.93k
      cy = mpn_add_n (u0, u0, u1, un);
93
2.37k
    else
94
2.37k
      cy = mpn_addmul_1 (u0, u1, un, q);
95
10.3k
  }
96
5.35k
      else
97
5.35k
  {
98
5.35k
    mp_size_t u1n;
99
5.35k
    mp_ptr tp;
100
101
5.35k
    u1n = un;
102
5.35k
    MPN_NORMALIZE (u1, u1n);
103
104
5.35k
    if (u1n == 0)
105
0
      return;
106
107
    /* Should always have u1n == un here, and u1 >= u0. The
108
       reason is that we alternate adding u0 to u1 and u1 to u0
109
       (corresponding to subtractions a - b and b - a), and we
110
       can get a large quotient only just after a switch, which
111
       means that we'll add (a multiple of) the larger u to the
112
       smaller. */
113
114
5.35k
    tp = ctx->tp;
115
116
5.35k
    if (qn > u1n)
117
1.38k
      mpn_mul (tp, qp, qn, u1, u1n);
118
3.97k
    else
119
3.97k
      mpn_mul (tp, u1, u1n, qp, qn);
120
121
5.35k
    u1n += qn;
122
5.35k
    u1n -= tp[u1n-1] == 0;
123
124
5.35k
    if (u1n >= un)
125
5.35k
      {
126
5.35k
        cy = mpn_add (u0, tp, u1n, u0, un);
127
5.35k
        un = u1n;
128
5.35k
      }
129
0
    else
130
      /* Note: Unlikely case, maybe never happens? */
131
0
      cy = mpn_add (u0, u0, un, tp, u1n);
132
133
5.35k
  }
134
15.6k
      u0[un] = cy;
135
15.6k
      ctx->un = un + (cy > 0);
136
15.6k
    }
137
16.0k
}
138
139
/* Temporary storage: 3*(n+1) for u. If hgcd2 succeeds, we need n for
140
   the matrix-vector multiplication adjusting a, b. If hgcd fails, we
141
   need at most n for the quotient and n+1 for the u update (reusing
142
   the extra u). In all, 4n + 3. */
143
144
mp_size_t
145
mpn_gcdext_lehmer_n (mp_ptr gp, mp_ptr up, mp_size_t *usize,
146
         mp_ptr ap, mp_ptr bp, mp_size_t n,
147
         mp_ptr tp)
148
36.2k
{
149
36.2k
  mp_size_t ualloc = n + 1;
150
151
  /* Keeps track of the second row of the reduction matrix
152
   *
153
   *   M = (v0, v1 ; u0, u1)
154
   *
155
   * which correspond to the first column of the inverse
156
   *
157
   *   M^{-1} = (u1, -v1; -u0, v0)
158
   *
159
   * This implies that
160
   *
161
   *   a =  u1 A (mod B)
162
   *   b = -u0 A (mod B)
163
   *
164
   * where A, B denotes the input values.
165
   */
166
167
36.2k
  struct gcdext_ctx ctx;
168
36.2k
  mp_size_t un;
169
36.2k
  mp_ptr u0;
170
36.2k
  mp_ptr u1;
171
36.2k
  mp_ptr u2;
172
173
36.2k
  MPN_ZERO (tp, 3*ualloc);
174
36.2k
  u0 = tp; tp += ualloc;
175
36.2k
  u1 = tp; tp += ualloc;
176
36.2k
  u2 = tp; tp += ualloc;
177
178
36.2k
  u1[0] = 1; un = 1;
179
180
36.2k
  ctx.gp = gp;
181
36.2k
  ctx.up = up;
182
36.2k
  ctx.usize = usize;
183
184
  /* FIXME: Handle n == 2 differently, after the loop? */
185
656k
  while (n >= 2)
186
620k
    {
187
620k
      struct hgcd_matrix1 M;
188
620k
      mp_limb_t ah, al, bh, bl;
189
620k
      mp_limb_t mask;
190
191
620k
      mask = ap[n-1] | bp[n-1];
192
620k
      ASSERT (mask > 0);
193
194
620k
      if (mask & GMP_NUMB_HIGHBIT)
195
27.3k
  {
196
27.3k
    ah = ap[n-1]; al = ap[n-2];
197
27.3k
    bh = bp[n-1]; bl = bp[n-2];
198
27.3k
  }
199
593k
      else if (n == 2)
200
33.2k
  {
201
    /* We use the full inputs without truncation, so we can
202
       safely shift left. */
203
33.2k
    int shift;
204
205
33.2k
    count_leading_zeros (shift, mask);
206
33.2k
    ah = MPN_EXTRACT_NUMB (shift, ap[1], ap[0]);
207
33.2k
    al = ap[0] << shift;
208
33.2k
    bh = MPN_EXTRACT_NUMB (shift, bp[1], bp[0]);
209
33.2k
    bl = bp[0] << shift;
210
33.2k
  }
211
559k
      else
212
559k
  {
213
559k
    int shift;
214
215
559k
    count_leading_zeros (shift, mask);
216
559k
    ah = MPN_EXTRACT_NUMB (shift, ap[n-1], ap[n-2]);
217
559k
    al = MPN_EXTRACT_NUMB (shift, ap[n-2], ap[n-3]);
218
559k
    bh = MPN_EXTRACT_NUMB (shift, bp[n-1], bp[n-2]);
219
559k
    bl = MPN_EXTRACT_NUMB (shift, bp[n-2], bp[n-3]);
220
559k
  }
221
222
      /* Try an mpn_nhgcd2 step */
223
620k
      if (mpn_hgcd2 (ah, al, bh, bl, &M))
224
612k
  {
225
612k
    n = mpn_matrix22_mul1_inverse_vector (&M, tp, ap, bp, n);
226
612k
    MP_PTR_SWAP (ap, tp);
227
612k
    un = mpn_hgcd_mul_matrix1_vector(&M, u2, u0, u1, un);
228
612k
    MP_PTR_SWAP (u0, u2);
229
612k
  }
230
8.10k
      else
231
8.10k
  {
232
    /* mpn_hgcd2 has failed. Then either one of a or b is very
233
       small, or the difference is very small. Perform one
234
       subtraction followed by one division. */
235
8.10k
    ctx.u0 = u0;
236
8.10k
    ctx.u1 = u1;
237
8.10k
    ctx.tp = u2;
238
8.10k
    ctx.un = un;
239
240
    /* Temporary storage n for the quotient and ualloc for the
241
       new cofactor. */
242
8.10k
    n = mpn_gcd_subdiv_step (ap, bp, n, 0, mpn_gcdext_hook, &ctx, tp);
243
8.10k
    if (n == 0)
244
375
      return ctx.gn;
245
246
7.72k
    un = ctx.un;
247
7.72k
  }
248
620k
    }
249
35.9k
  ASSERT_ALWAYS (ap[0] > 0);
250
35.9k
  ASSERT_ALWAYS (bp[0] > 0);
251
252
35.9k
  if (ap[0] == bp[0])
253
964
    {
254
964
      int c;
255
256
      /* Which cofactor to return now? Candidates are +u1 and -u0,
257
   depending on which of a and b was most recently reduced,
258
   which we don't keep track of. So compare and get the smallest
259
   one. */
260
261
964
      gp[0] = ap[0];
262
263
964
      MPN_CMP (c, u0, u1, un);
264
964
      ASSERT (c != 0 || (un == 1 && u0[0] == 1 && u1[0] == 1));
265
964
      if (c < 0)
266
494
  {
267
494
    MPN_NORMALIZE (u0, un);
268
494
    MPN_COPY (up, u0, un);
269
494
    *usize = -un;
270
494
  }
271
470
      else
272
470
  {
273
470
    MPN_NORMALIZE_NOT_ZERO (u1, un);
274
470
    MPN_COPY (up, u1, un);
275
470
    *usize = un;
276
470
  }
277
964
      return 1;
278
964
    }
279
34.9k
  else
280
34.9k
    {
281
34.9k
      mp_limb_t uh, vh;
282
34.9k
      mp_limb_signed_t u;
283
34.9k
      mp_limb_signed_t v;
284
34.9k
      int negate;
285
286
34.9k
      gp[0] = mpn_gcdext_1 (&u, &v, ap[0], bp[0]);
287
288
      /* Set up = u u1 - v u0. Keep track of size, un grows by one or
289
   two limbs. */
290
291
34.9k
      if (u == 0)
292
78
  {
293
78
    ASSERT (v == 1);
294
78
    MPN_NORMALIZE (u0, un);
295
78
    MPN_COPY (up, u0, un);
296
78
    *usize = -un;
297
78
    return 1;
298
78
  }
299
34.8k
      else if (v == 0)
300
351
  {
301
351
    ASSERT (u == 1);
302
351
    MPN_NORMALIZE (u1, un);
303
351
    MPN_COPY (up, u1, un);
304
351
    *usize = un;
305
351
    return 1;
306
351
  }
307
34.5k
      else if (u > 0)
308
27.9k
  {
309
27.9k
    negate = 0;
310
27.9k
    ASSERT (v < 0);
311
27.9k
    v = -v;
312
27.9k
  }
313
6.60k
      else
314
6.60k
  {
315
6.60k
    negate = 1;
316
6.60k
    ASSERT (v > 0);
317
6.60k
    u = -u;
318
6.60k
  }
319
320
34.5k
      uh = mpn_mul_1 (up, u1, un, u);
321
34.5k
      vh = mpn_addmul_1 (up, u0, un, v);
322
323
34.5k
      if ( (uh | vh) > 0)
324
4.04k
  {
325
4.04k
    uh += vh;
326
4.04k
    up[un++] = uh;
327
4.04k
    if (uh < vh)
328
0
      up[un++] = 1;
329
4.04k
  }
330
331
34.5k
      MPN_NORMALIZE_NOT_ZERO (up, un);
332
333
34.5k
      *usize = negate ? -un : un;
334
34.5k
      return 1;
335
34.9k
    }
336
35.9k
}