Coverage Report

Created: 2026-02-09 06:47

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/gmp/mpn/gcdext_lehmer.c
Line
Count
Source
1
/* mpn_gcdext -- Extended Greatest Common Divisor.
2
3
Copyright 1996, 1998, 2000-2005, 2008, 2009, 2012 Free Software Foundation,
4
Inc.
5
6
This file is part of the GNU MP Library.
7
8
The GNU MP Library is free software; you can redistribute it and/or modify
9
it under the terms of either:
10
11
  * the GNU Lesser General Public License as published by the Free
12
    Software Foundation; either version 3 of the License, or (at your
13
    option) any later version.
14
15
or
16
17
  * the GNU General Public License as published by the Free Software
18
    Foundation; either version 2 of the License, or (at your option) any
19
    later version.
20
21
or both in parallel, as here.
22
23
The GNU MP Library is distributed in the hope that it will be useful, but
24
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
25
or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
26
for more details.
27
28
You should have received copies of the GNU General Public License and the
29
GNU Lesser General Public License along with the GNU MP Library.  If not,
30
see https://www.gnu.org/licenses/.  */
31
32
#include "gmp-impl.h"
33
#include "longlong.h"
34
35
/* Here, d is the index of the cofactor to update. FIXME: Could use qn
36
   = 0 for the common case q = 1. */
37
void
38
mpn_gcdext_hook (void *p, mp_srcptr gp, mp_size_t gn,
39
     mp_srcptr qp, mp_size_t qn, int d)
40
16.5k
{
41
16.5k
  struct gcdext_ctx *ctx = (struct gcdext_ctx *) p;
42
16.5k
  mp_size_t un = ctx->un;
43
44
16.5k
  if (gp)
45
384
    {
46
384
      mp_srcptr up;
47
48
384
      ASSERT (gn > 0);
49
384
      ASSERT (gp[gn-1] > 0);
50
51
384
      MPN_COPY (ctx->gp, gp, gn);
52
384
      ctx->gn = gn;
53
54
384
      if (d < 0)
55
166
  {
56
166
    int c;
57
58
    /* Must return the smallest cofactor, +u1 or -u0 */
59
166
    MPN_CMP (c, ctx->u0, ctx->u1, un);
60
166
    ASSERT (c != 0 || (un == 1 && ctx->u0[0] == 1 && ctx->u1[0] == 1));
61
62
166
    d = c < 0;
63
166
  }
64
65
384
      up = d ? ctx->u0 : ctx->u1;
66
67
384
      MPN_NORMALIZE (up, un);
68
384
      MPN_COPY (ctx->up, up, un);
69
70
384
      *ctx->usize = d ? -un : un;
71
384
    }
72
16.1k
  else
73
16.1k
    {
74
16.1k
      mp_limb_t cy;
75
16.1k
      mp_ptr u0 = ctx->u0;
76
16.1k
      mp_ptr u1 = ctx->u1;
77
78
16.1k
      ASSERT (d >= 0);
79
80
16.1k
      if (d)
81
7.53k
  MP_PTR_SWAP (u0, u1);
82
83
16.1k
      qn -= (qp[qn-1] == 0);
84
85
      /* Update u0 += q  * u1 */
86
16.1k
      if (qn == 1)
87
10.5k
  {
88
10.5k
    mp_limb_t q = qp[0];
89
90
10.5k
    if (q == 1)
91
      /* A common case. */
92
8.20k
      cy = mpn_add_n (u0, u0, u1, un);
93
2.36k
    else
94
2.36k
      cy = mpn_addmul_1 (u0, u1, un, q);
95
10.5k
  }
96
5.62k
      else
97
5.62k
  {
98
5.62k
    mp_size_t u1n;
99
5.62k
    mp_ptr tp;
100
101
5.62k
    u1n = un;
102
5.62k
    MPN_NORMALIZE (u1, u1n);
103
104
5.62k
    if (u1n == 0)
105
0
      return;
106
107
    /* Should always have u1n == un here, and u1 >= u0. The
108
       reason is that we alternate adding u0 to u1 and u1 to u0
109
       (corresponding to subtractions a - b and b - a), and we
110
       can get a large quotient only just after a switch, which
111
       means that we'll add (a multiple of) the larger u to the
112
       smaller. */
113
114
5.62k
    tp = ctx->tp;
115
116
5.62k
    if (qn > u1n)
117
1.58k
      mpn_mul (tp, qp, qn, u1, u1n);
118
4.04k
    else
119
4.04k
      mpn_mul (tp, u1, u1n, qp, qn);
120
121
5.62k
    u1n += qn;
122
5.62k
    u1n -= tp[u1n-1] == 0;
123
124
5.62k
    if (u1n >= un)
125
5.62k
      {
126
5.62k
        cy = mpn_add (u0, tp, u1n, u0, un);
127
5.62k
        un = u1n;
128
5.62k
      }
129
0
    else
130
      /* Note: Unlikely case, maybe never happens? */
131
0
      cy = mpn_add (u0, u0, un, tp, u1n);
132
133
5.62k
  }
134
16.1k
      u0[un] = cy;
135
16.1k
      ctx->un = un + (cy > 0);
136
16.1k
    }
137
16.5k
}
138
139
/* Temporary storage: 3*(n+1) for u. If hgcd2 succeeds, we need n for
140
   the matrix-vector multiplication adjusting a, b. If hgcd fails, we
141
   need at most n for the quotient and n+1 for the u update (reusing
142
   the extra u). In all, 4n + 3. */
143
144
mp_size_t
145
mpn_gcdext_lehmer_n (mp_ptr gp, mp_ptr up, mp_size_t *usize,
146
         mp_ptr ap, mp_ptr bp, mp_size_t n,
147
         mp_ptr tp)
148
35.0k
{
149
35.0k
  mp_size_t ualloc = n + 1;
150
151
  /* Keeps track of the second row of the reduction matrix
152
   *
153
   *   M = (v0, v1 ; u0, u1)
154
   *
155
   * which correspond to the first column of the inverse
156
   *
157
   *   M^{-1} = (u1, -v1; -u0, v0)
158
   *
159
   * This implies that
160
   *
161
   *   a =  u1 A (mod B)
162
   *   b = -u0 A (mod B)
163
   *
164
   * where A, B denotes the input values.
165
   */
166
167
35.0k
  struct gcdext_ctx ctx;
168
35.0k
  mp_size_t un;
169
35.0k
  mp_ptr u0;
170
35.0k
  mp_ptr u1;
171
35.0k
  mp_ptr u2;
172
173
35.0k
  MPN_ZERO (tp, 3*ualloc);
174
35.0k
  u0 = tp; tp += ualloc;
175
35.0k
  u1 = tp; tp += ualloc;
176
35.0k
  u2 = tp; tp += ualloc;
177
178
35.0k
  u1[0] = 1; un = 1;
179
180
35.0k
  ctx.gp = gp;
181
35.0k
  ctx.up = up;
182
35.0k
  ctx.usize = usize;
183
184
  /* FIXME: Handle n == 2 differently, after the loop? */
185
626k
  while (n >= 2)
186
591k
    {
187
591k
      struct hgcd_matrix1 M;
188
591k
      mp_limb_t ah, al, bh, bl;
189
591k
      mp_limb_t mask;
190
191
591k
      mask = ap[n-1] | bp[n-1];
192
591k
      ASSERT (mask > 0);
193
194
591k
      if (mask & GMP_NUMB_HIGHBIT)
195
25.6k
  {
196
25.6k
    ah = ap[n-1]; al = ap[n-2];
197
25.6k
    bh = bp[n-1]; bl = bp[n-2];
198
25.6k
  }
199
565k
      else if (n == 2)
200
31.8k
  {
201
    /* We use the full inputs without truncation, so we can
202
       safely shift left. */
203
31.8k
    int shift;
204
205
31.8k
    count_leading_zeros (shift, mask);
206
31.8k
    ah = MPN_EXTRACT_NUMB (shift, ap[1], ap[0]);
207
31.8k
    al = ap[0] << shift;
208
31.8k
    bh = MPN_EXTRACT_NUMB (shift, bp[1], bp[0]);
209
31.8k
    bl = bp[0] << shift;
210
31.8k
  }
211
534k
      else
212
534k
  {
213
534k
    int shift;
214
215
534k
    count_leading_zeros (shift, mask);
216
534k
    ah = MPN_EXTRACT_NUMB (shift, ap[n-1], ap[n-2]);
217
534k
    al = MPN_EXTRACT_NUMB (shift, ap[n-2], ap[n-3]);
218
534k
    bh = MPN_EXTRACT_NUMB (shift, bp[n-1], bp[n-2]);
219
534k
    bl = MPN_EXTRACT_NUMB (shift, bp[n-2], bp[n-3]);
220
534k
  }
221
222
      /* Try an mpn_nhgcd2 step */
223
591k
      if (mpn_hgcd2 (ah, al, bh, bl, &M))
224
583k
  {
225
583k
    n = mpn_matrix22_mul1_inverse_vector (&M, tp, ap, bp, n);
226
583k
    MP_PTR_SWAP (ap, tp);
227
583k
    un = mpn_hgcd_mul_matrix1_vector(&M, u2, u0, u1, un);
228
583k
    MP_PTR_SWAP (u0, u2);
229
583k
  }
230
8.37k
      else
231
8.37k
  {
232
    /* mpn_hgcd2 has failed. Then either one of a or b is very
233
       small, or the difference is very small. Perform one
234
       subtraction followed by one division. */
235
8.37k
    ctx.u0 = u0;
236
8.37k
    ctx.u1 = u1;
237
8.37k
    ctx.tp = u2;
238
8.37k
    ctx.un = un;
239
240
    /* Temporary storage n for the quotient and ualloc for the
241
       new cofactor. */
242
8.37k
    n = mpn_gcd_subdiv_step (ap, bp, n, 0, mpn_gcdext_hook, &ctx, tp);
243
8.37k
    if (n == 0)
244
384
      return ctx.gn;
245
246
7.98k
    un = ctx.un;
247
7.98k
  }
248
591k
    }
249
34.7k
  ASSERT_ALWAYS (ap[0] > 0);
250
34.7k
  ASSERT_ALWAYS (bp[0] > 0);
251
252
34.7k
  if (ap[0] == bp[0])
253
1.04k
    {
254
1.04k
      int c;
255
256
      /* Which cofactor to return now? Candidates are +u1 and -u0,
257
   depending on which of a and b was most recently reduced,
258
   which we don't keep track of. So compare and get the smallest
259
   one. */
260
261
1.04k
      gp[0] = ap[0];
262
263
1.04k
      MPN_CMP (c, u0, u1, un);
264
1.04k
      ASSERT (c != 0 || (un == 1 && u0[0] == 1 && u1[0] == 1));
265
1.04k
      if (c < 0)
266
535
  {
267
535
    MPN_NORMALIZE (u0, un);
268
535
    MPN_COPY (up, u0, un);
269
535
    *usize = -un;
270
535
  }
271
505
      else
272
505
  {
273
505
    MPN_NORMALIZE_NOT_ZERO (u1, un);
274
505
    MPN_COPY (up, u1, un);
275
505
    *usize = un;
276
505
  }
277
1.04k
      return 1;
278
1.04k
    }
279
33.6k
  else
280
33.6k
    {
281
33.6k
      mp_limb_t uh, vh;
282
33.6k
      mp_limb_signed_t u;
283
33.6k
      mp_limb_signed_t v;
284
33.6k
      int negate;
285
286
33.6k
      gp[0] = mpn_gcdext_1 (&u, &v, ap[0], bp[0]);
287
288
      /* Set up = u u1 - v u0. Keep track of size, un grows by one or
289
   two limbs. */
290
291
33.6k
      if (u == 0)
292
92
  {
293
92
    ASSERT (v == 1);
294
92
    MPN_NORMALIZE (u0, un);
295
92
    MPN_COPY (up, u0, un);
296
92
    *usize = -un;
297
92
    return 1;
298
92
  }
299
33.5k
      else if (v == 0)
300
452
  {
301
452
    ASSERT (u == 1);
302
452
    MPN_NORMALIZE (u1, un);
303
452
    MPN_COPY (up, u1, un);
304
452
    *usize = un;
305
452
    return 1;
306
452
  }
307
33.1k
      else if (u > 0)
308
26.1k
  {
309
26.1k
    negate = 0;
310
26.1k
    ASSERT (v < 0);
311
26.1k
    v = -v;
312
26.1k
  }
313
6.97k
      else
314
6.97k
  {
315
6.97k
    negate = 1;
316
6.97k
    ASSERT (v > 0);
317
6.97k
    u = -u;
318
6.97k
  }
319
320
33.1k
      uh = mpn_mul_1 (up, u1, un, u);
321
33.1k
      vh = mpn_addmul_1 (up, u0, un, v);
322
323
33.1k
      if ( (uh | vh) > 0)
324
4.16k
  {
325
4.16k
    uh += vh;
326
4.16k
    up[un++] = uh;
327
4.16k
    if (uh < vh)
328
0
      up[un++] = 1;
329
4.16k
  }
330
331
33.1k
      MPN_NORMALIZE_NOT_ZERO (up, un);
332
333
33.1k
      *usize = negate ? -un : un;
334
33.1k
      return 1;
335
33.6k
    }
336
34.7k
}