Coverage Report

Created: 2023-09-25 06:34

/src/nettle-with-mini-gmp/mini-gmp.c
Line
Count
Source (jump to first uncovered line)
1
/* mini-gmp, a minimalistic implementation of a GNU GMP subset.
2
3
   Contributed to the GNU project by Niels Möller
4
5
Copyright 1991-1997, 1999-2020 Free Software Foundation, Inc.
6
7
This file is part of the GNU MP Library.
8
9
The GNU MP Library is free software; you can redistribute it and/or modify
10
it under the terms of either:
11
12
  * the GNU Lesser General Public License as published by the Free
13
    Software Foundation; either version 3 of the License, or (at your
14
    option) any later version.
15
16
or
17
18
  * the GNU General Public License as published by the Free Software
19
    Foundation; either version 2 of the License, or (at your option) any
20
    later version.
21
22
or both in parallel, as here.
23
24
The GNU MP Library is distributed in the hope that it will be useful, but
25
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
26
or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
27
for more details.
28
29
You should have received copies of the GNU General Public License and the
30
GNU Lesser General Public License along with the GNU MP Library.  If not,
31
see https://www.gnu.org/licenses/.  */
32
33
/* NOTE: All functions in this file which are not declared in
34
   mini-gmp.h are internal, and are not intended to be compatible
35
   neither with GMP nor with future versions of mini-gmp. */
36
37
/* Much of the material copied from GMP files, including: gmp-impl.h,
38
   longlong.h, mpn/generic/add_n.c, mpn/generic/addmul_1.c,
39
   mpn/generic/lshift.c, mpn/generic/mul_1.c,
40
   mpn/generic/mul_basecase.c, mpn/generic/rshift.c,
41
   mpn/generic/sbpi1_div_qr.c, mpn/generic/sub_n.c,
42
   mpn/generic/submul_1.c. */
43
44
#include <assert.h>
45
#include <ctype.h>
46
#include <limits.h>
47
#include <stdio.h>
48
#include <stdlib.h>
49
#include <string.h>
50
51
#include "mini-gmp.h"
52
53
#if !defined(MINI_GMP_DONT_USE_FLOAT_H)
54
#include <float.h>
55
#endif
56
57

58
/* Macros */
59
2.81G
#define GMP_LIMB_BITS (sizeof(mp_limb_t) * CHAR_BIT)
60
61
56.7k
#define GMP_LIMB_MAX ((mp_limb_t) ~ (mp_limb_t) 0)
62
53.4k
#define GMP_LIMB_HIGHBIT ((mp_limb_t) 1 << (GMP_LIMB_BITS - 1))
63
64
778M
#define GMP_HLIMB_BIT ((mp_limb_t) 1 << (GMP_LIMB_BITS / 2))
65
761M
#define GMP_LLIMB_MASK (GMP_HLIMB_BIT - 1)
66
67
253M
#define GMP_ULONG_BITS (sizeof(unsigned long) * CHAR_BIT)
68
0
#define GMP_ULONG_HIGHBIT ((unsigned long) 1 << (GMP_ULONG_BITS - 1))
69
70
62.3k
#define GMP_ABS(x) ((x) >= 0 ? (x) : -(x))
71
0
#define GMP_NEG_CAST(T,x) (-((T)((x) + 1) - 1))
72
73
0
#define GMP_MIN(a, b) ((a) < (b) ? (a) : (b))
74
16.0k
#define GMP_MAX(a, b) ((a) > (b) ? (a) : (b))
75
76
8.58k
#define GMP_CMP(a,b) (((a) > (b)) - ((a) < (b)))
77
78
#if defined(DBL_MANT_DIG) && FLT_RADIX == 2
79
0
#define GMP_DBL_MANT_BITS DBL_MANT_DIG
80
#else
81
#define GMP_DBL_MANT_BITS (53)
82
#endif
83
84
/* Return non-zero if xp,xsize and yp,ysize overlap.
85
   If xp+xsize<=yp there's no overlap, or if yp+ysize<=xp there's no
86
   overlap.  If both these are false, there's an overlap. */
87
#define GMP_MPN_OVERLAP_P(xp, xsize, yp, ysize)       \
88
  ((xp) + (xsize) > (yp) && (yp) + (ysize) > (xp))
89
90
4.45k
#define gmp_assert_nocarry(x) do { \
91
4.45k
    mp_limb_t __cy = (x);    \
92
4.45k
    assert (__cy == 0);      \
93
4.45k
  } while (0)
94
95
16.2k
#define gmp_clz(count, x) do {           \
96
16.2k
    mp_limb_t __clz_x = (x);            \
97
16.2k
    unsigned __clz_c = 0;           \
98
16.2k
    int LOCAL_SHIFT_BITS = 8;           \
99
16.2k
    if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS)       \
100
16.2k
      for (;                \
101
76.5k
     (__clz_x & ((mp_limb_t) 0xff << (GMP_LIMB_BITS - 8))) == 0; \
102
60.2k
     __clz_c += 8)           \
103
60.2k
  { __clz_x <<= LOCAL_SHIFT_BITS; }        \
104
53.4k
    for (; (__clz_x & GMP_LIMB_HIGHBIT) == 0; __clz_c++)   \
105
37.1k
      __clz_x <<= 1;             \
106
16.2k
    (count) = __clz_c;              \
107
16.2k
  } while (0)
108
109
0
#define gmp_ctz(count, x) do {           \
110
0
    mp_limb_t __ctz_x = (x);            \
111
0
    unsigned __ctz_c = 0;           \
112
0
    gmp_clz (__ctz_c, __ctz_x & - __ctz_x);        \
113
0
    (count) = GMP_LIMB_BITS - 1 - __ctz_c;       \
114
0
  } while (0)
115
116
#define gmp_add_ssaaaa(sh, sl, ah, al, bh, bl) \
117
1.64M
  do {                 \
118
1.64M
    mp_limb_t __x;              \
119
1.64M
    __x = (al) + (bl);              \
120
1.64M
    (sh) = (ah) + (bh) + (__x < (al));          \
121
1.64M
    (sl) = __x;               \
122
1.64M
  } while (0)
123
124
#define gmp_sub_ddmmss(sh, sl, ah, al, bh, bl) \
125
22.8k
  do {                 \
126
22.8k
    mp_limb_t __x;              \
127
22.8k
    __x = (al) - (bl);              \
128
22.8k
    (sh) = (ah) - (bh) - ((al) < (bl));         \
129
22.8k
    (sl) = __x;               \
130
22.8k
  } while (0)
131
132
#define gmp_umul_ppmm(w1, w0, u, v)         \
133
253M
  do {                 \
134
253M
    int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS;       \
135
253M
    if (sizeof(unsigned int) * CHAR_BIT >= 2 * GMP_LIMB_BITS)   \
136
253M
      {                 \
137
0
  unsigned int __ww = (unsigned int) (u) * (v);     \
138
0
  w0 = (mp_limb_t) __ww;            \
139
0
  w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS);     \
140
0
      }                  \
141
253M
    else if (GMP_ULONG_BITS >= 2 * GMP_LIMB_BITS)     \
142
253M
      {                 \
143
0
  unsigned long int __ww = (unsigned long int) (u) * (v);   \
144
0
  w0 = (mp_limb_t) __ww;            \
145
0
  w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS);     \
146
0
      }                  \
147
253M
    else {               \
148
253M
      mp_limb_t __x0, __x1, __x2, __x3;         \
149
253M
      unsigned __ul, __vl, __uh, __vh;          \
150
253M
      mp_limb_t __u = (u), __v = (v);         \
151
253M
                  \
152
253M
      __ul = __u & GMP_LLIMB_MASK;         \
153
253M
      __uh = __u >> (GMP_LIMB_BITS / 2);        \
154
253M
      __vl = __v & GMP_LLIMB_MASK;         \
155
253M
      __vh = __v >> (GMP_LIMB_BITS / 2);        \
156
253M
                  \
157
253M
      __x0 = (mp_limb_t) __ul * __vl;         \
158
253M
      __x1 = (mp_limb_t) __ul * __vh;         \
159
253M
      __x2 = (mp_limb_t) __uh * __vl;         \
160
253M
      __x3 = (mp_limb_t) __uh * __vh;         \
161
253M
                  \
162
253M
      __x1 += __x0 >> (GMP_LIMB_BITS / 2);/* this can't give carry */ \
163
253M
      __x1 += __x2;   /* but this indeed can */   \
164
253M
      if (__x1 < __x2)   /* did we get it? */      \
165
253M
  __x3 += GMP_HLIMB_BIT; /* yes, add it in the proper pos. */  \
166
253M
                  \
167
253M
      (w1) = __x3 + (__x1 >> (GMP_LIMB_BITS / 2));     \
168
253M
      (w0) = (__x1 << (GMP_LIMB_BITS / 2)) + (__x0 & GMP_LLIMB_MASK);  \
169
253M
    }                 \
170
253M
  } while (0)
171
172
#define gmp_udiv_qrnnd_preinv(q, r, nh, nl, d, di)      \
173
1.61M
  do {                 \
174
1.61M
    mp_limb_t _qh, _ql, _r, _mask;          \
175
1.61M
    gmp_umul_ppmm (_qh, _ql, (nh), (di));        \
176
1.61M
    gmp_add_ssaaaa (_qh, _ql, _qh, _ql, (nh) + 1, (nl));    \
177
1.61M
    _r = (nl) - _qh * (d);            \
178
1.61M
    _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */   \
179
1.61M
    _qh += _mask;             \
180
1.61M
    _r += _mask & (d);              \
181
1.61M
    if (_r >= (d))             \
182
1.61M
      {                 \
183
36
  _r -= (d);              \
184
36
  _qh++;                \
185
36
      }                  \
186
1.61M
                  \
187
1.61M
    (r) = _r;               \
188
1.61M
    (q) = _qh;                \
189
1.61M
  } while (0)
190
191
#define gmp_udiv_qr_3by2(q, r1, r0, n2, n1, n0, d1, d0, dinv)   \
192
11.4k
  do {                 \
193
11.4k
    mp_limb_t _q0, _t1, _t0, _mask;         \
194
11.4k
    gmp_umul_ppmm ((q), _q0, (n2), (dinv));        \
195
11.4k
    gmp_add_ssaaaa ((q), _q0, (q), _q0, (n2), (n1));      \
196
11.4k
                  \
197
11.4k
    /* Compute the two most significant limbs of n - q'd */   \
198
11.4k
    (r1) = (n1) - (d1) * (q);           \
199
11.4k
    gmp_sub_ddmmss ((r1), (r0), (r1), (n0), (d1), (d0));    \
200
11.4k
    gmp_umul_ppmm (_t1, _t0, (d0), (q));       \
201
11.4k
    gmp_sub_ddmmss ((r1), (r0), (r1), (r0), _t1, _t0);      \
202
11.4k
    (q)++;                \
203
11.4k
                  \
204
11.4k
    /* Conditionally adjust q and the remainders */     \
205
11.4k
    _mask = - (mp_limb_t) ((r1) >= _q0);        \
206
11.4k
    (q) += _mask;             \
207
11.4k
    gmp_add_ssaaaa ((r1), (r0), (r1), (r0), _mask & (d1), _mask & (d0)); \
208
11.4k
    if ((r1) >= (d1))             \
209
11.4k
      {                 \
210
69
  if ((r1) > (d1) || (r0) >= (d0))       \
211
69
    {               \
212
0
      (q)++;              \
213
0
      gmp_sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0));  \
214
0
    }                \
215
69
      }                  \
216
11.4k
  } while (0)
217
218
/* Swap macros. */
219
#define MP_LIMB_T_SWAP(x, y)            \
220
0
  do {                 \
221
0
    mp_limb_t __mp_limb_t_swap__tmp = (x);        \
222
0
    (x) = (y);                \
223
0
    (y) = __mp_limb_t_swap__tmp;          \
224
0
  } while (0)
225
#define MP_SIZE_T_SWAP(x, y)            \
226
9.35k
  do {                 \
227
9.35k
    mp_size_t __mp_size_t_swap__tmp = (x);        \
228
9.35k
    (x) = (y);                \
229
9.35k
    (y) = __mp_size_t_swap__tmp;          \
230
9.35k
  } while (0)
231
#define MP_BITCNT_T_SWAP(x,y)     \
232
0
  do {           \
233
0
    mp_bitcnt_t __mp_bitcnt_t_swap__tmp = (x);  \
234
0
    (x) = (y);          \
235
0
    (y) = __mp_bitcnt_t_swap__tmp;    \
236
0
  } while (0)
237
#define MP_PTR_SWAP(x, y)           \
238
4.45k
  do {                 \
239
4.45k
    mp_ptr __mp_ptr_swap__tmp = (x);          \
240
4.45k
    (x) = (y);                \
241
4.45k
    (y) = __mp_ptr_swap__tmp;           \
242
4.45k
  } while (0)
243
#define MP_SRCPTR_SWAP(x, y)            \
244
0
  do {                 \
245
0
    mp_srcptr __mp_srcptr_swap__tmp = (x);        \
246
0
    (x) = (y);                \
247
0
    (y) = __mp_srcptr_swap__tmp;          \
248
0
  } while (0)
249
250
#define MPN_PTR_SWAP(xp,xs, yp,ys)          \
251
  do {                  \
252
    MP_PTR_SWAP (xp, yp);           \
253
    MP_SIZE_T_SWAP (xs, ys);            \
254
  } while(0)
255
#define MPN_SRCPTR_SWAP(xp,xs, yp,ys)         \
256
0
  do {                 \
257
0
    MP_SRCPTR_SWAP (xp, yp);            \
258
0
    MP_SIZE_T_SWAP (xs, ys);            \
259
0
  } while(0)
260
261
#define MPZ_PTR_SWAP(x, y)            \
262
0
  do {                 \
263
0
    mpz_ptr __mpz_ptr_swap__tmp = (x);          \
264
0
    (x) = (y);                \
265
0
    (y) = __mpz_ptr_swap__tmp;            \
266
0
  } while (0)
267
#define MPZ_SRCPTR_SWAP(x, y)           \
268
444
  do {                 \
269
444
    mpz_srcptr __mpz_srcptr_swap__tmp = (x);        \
270
444
    (x) = (y);                \
271
444
    (y) = __mpz_srcptr_swap__tmp;         \
272
444
  } while (0)
273
274
const int mp_bits_per_limb = GMP_LIMB_BITS;
275
276

277
/* Memory allocation and other helper functions. */
278
static void
279
gmp_die (const char *msg)
280
0
{
281
0
  fprintf (stderr, "%s\n", msg);
282
0
  abort();
283
0
}
284
285
static void *
286
gmp_default_alloc (size_t size)
287
42.7k
{
288
42.7k
  void *p;
289
290
42.7k
  assert (size > 0);
291
292
42.7k
  p = malloc (size);
293
42.7k
  if (!p)
294
0
    gmp_die("gmp_default_alloc: Virtual memory exhausted.");
295
296
42.7k
  return p;
297
42.7k
}
298
299
static void *
300
gmp_default_realloc (void *old, size_t unused_old_size, size_t new_size)
301
1.41k
{
302
1.41k
  void * p;
303
304
1.41k
  p = realloc (old, new_size);
305
306
1.41k
  if (!p)
307
0
    gmp_die("gmp_default_realloc: Virtual memory exhausted.");
308
309
1.41k
  return p;
310
1.41k
}
311
312
static void
313
gmp_default_free (void *p, size_t unused_size)
314
39.0k
{
315
39.0k
  free (p);
316
39.0k
}
317
318
static void * (*gmp_allocate_func) (size_t) = gmp_default_alloc;
319
static void * (*gmp_reallocate_func) (void *, size_t, size_t) = gmp_default_realloc;
320
static void (*gmp_free_func) (void *, size_t) = gmp_default_free;
321
322
void
323
mp_get_memory_functions (void *(**alloc_func) (size_t),
324
       void *(**realloc_func) (void *, size_t, size_t),
325
       void (**free_func) (void *, size_t))
326
12.3k
{
327
12.3k
  if (alloc_func)
328
6.15k
    *alloc_func = gmp_allocate_func;
329
330
12.3k
  if (realloc_func)
331
0
    *realloc_func = gmp_reallocate_func;
332
333
12.3k
  if (free_func)
334
6.15k
    *free_func = gmp_free_func;
335
12.3k
}
336
337
void
338
mp_set_memory_functions (void *(*alloc_func) (size_t),
339
       void *(*realloc_func) (void *, size_t, size_t),
340
       void (*free_func) (void *, size_t))
341
0
{
342
0
  if (!alloc_func)
343
0
    alloc_func = gmp_default_alloc;
344
0
  if (!realloc_func)
345
0
    realloc_func = gmp_default_realloc;
346
0
  if (!free_func)
347
0
    free_func = gmp_default_free;
348
349
0
  gmp_allocate_func = alloc_func;
350
0
  gmp_reallocate_func = realloc_func;
351
0
  gmp_free_func = free_func;
352
0
}
353
354
36.5k
#define gmp_xalloc(size) ((*gmp_allocate_func)((size)))
355
32.8k
#define gmp_free(p) ((*gmp_free_func) ((p), 0))
356
357
static mp_ptr
358
gmp_xalloc_limbs (mp_size_t size)
359
27.0k
{
360
27.0k
  return (mp_ptr) gmp_xalloc (size * sizeof (mp_limb_t));
361
27.0k
}
362
363
static mp_ptr
364
gmp_xrealloc_limbs (mp_ptr old, mp_size_t size)
365
1.41k
{
366
1.41k
  assert (size > 0);
367
1.41k
  return (mp_ptr) (*gmp_reallocate_func) (old, 0, size * sizeof (mp_limb_t));
368
1.41k
}
369
370

371
/* MPN interface */
372
373
void
374
mpn_copyi (mp_ptr d, mp_srcptr s, mp_size_t n)
375
20.1k
{
376
20.1k
  mp_size_t i;
377
137k
  for (i = 0; i < n; i++)
378
117k
    d[i] = s[i];
379
20.1k
}
380
381
void
382
mpn_copyd (mp_ptr d, mp_srcptr s, mp_size_t n)
383
192
{
384
1.72k
  while (--n >= 0)
385
1.53k
    d[n] = s[n];
386
192
}
387
388
int
389
mpn_cmp (mp_srcptr ap, mp_srcptr bp, mp_size_t n)
390
3.99k
{
391
7.33k
  while (--n >= 0)
392
6.76k
    {
393
6.76k
      if (ap[n] != bp[n])
394
3.42k
  return ap[n] > bp[n] ? 1 : -1;
395
6.76k
    }
396
571
  return 0;
397
3.99k
}
398
399
static int
400
mpn_cmp4 (mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn)
401
3.27k
{
402
3.27k
  if (an != bn)
403
3.02k
    return an < bn ? -1 : 1;
404
247
  else
405
247
    return mpn_cmp (ap, bp, an);
406
3.27k
}
407
408
static mp_size_t
409
mpn_normalized_size (mp_srcptr xp, mp_size_t n)
410
17.1k
{
411
23.7k
  while (n > 0 && xp[n-1] == 0)
412
6.62k
    --n;
413
17.1k
  return n;
414
17.1k
}
415
416
int
417
mpn_zero_p(mp_srcptr rp, mp_size_t n)
418
1.96k
{
419
1.96k
  return mpn_normalized_size (rp, n) == 0;
420
1.96k
}
421
422
void
423
mpn_zero (mp_ptr rp, mp_size_t n)
424
8.90k
{
425
86.1k
  while (--n >= 0)
426
77.2k
    rp[n] = 0;
427
8.90k
}
428
429
mp_limb_t
430
mpn_add_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b)
431
20.5k
{
432
20.5k
  mp_size_t i;
433
434
20.5k
  assert (n > 0);
435
20.5k
  i = 0;
436
20.5k
  do
437
156k
    {
438
156k
      mp_limb_t r = ap[i] + b;
439
      /* Carry out */
440
156k
      b = (r < b);
441
156k
      rp[i] = r;
442
156k
    }
443
156k
  while (++i < n);
444
445
20.5k
  return b;
446
20.5k
}
447
448
mp_limb_t
449
mpn_add_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n)
450
1.04M
{
451
1.04M
  mp_size_t i;
452
1.04M
  mp_limb_t cy;
453
454
7.38M
  for (i = 0, cy = 0; i < n; i++)
455
6.34M
    {
456
6.34M
      mp_limb_t a, b, r;
457
6.34M
      a = ap[i]; b = bp[i];
458
6.34M
      r = a + cy;
459
6.34M
      cy = (r < cy);
460
6.34M
      r += b;
461
6.34M
      cy += (r < b);
462
6.34M
      rp[i] = r;
463
6.34M
    }
464
1.04M
  return cy;
465
1.04M
}
466
467
mp_limb_t
468
mpn_add (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn)
469
1.77k
{
470
1.77k
  mp_limb_t cy;
471
472
1.77k
  assert (an >= bn);
473
474
1.77k
  cy = mpn_add_n (rp, ap, bp, bn);
475
1.77k
  if (an > bn)
476
1.62k
    cy = mpn_add_1 (rp + bn, ap + bn, an - bn, cy);
477
1.77k
  return cy;
478
1.77k
}
479
480
mp_limb_t
481
mpn_sub_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b)
482
3.02k
{
483
3.02k
  mp_size_t i;
484
485
3.02k
  assert (n > 0);
486
487
3.02k
  i = 0;
488
3.02k
  do
489
18.5k
    {
490
18.5k
      mp_limb_t a = ap[i];
491
      /* Carry out */
492
18.5k
      mp_limb_t cy = a < b;
493
18.5k
      rp[i] = a - b;
494
18.5k
      b = cy;
495
18.5k
    }
496
18.5k
  while (++i < n);
497
498
3.02k
  return b;
499
3.02k
}
500
501
mp_limb_t
502
mpn_sub_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n)
503
2.24M
{
504
2.24M
  mp_size_t i;
505
2.24M
  mp_limb_t cy;
506
507
16.0M
  for (i = 0, cy = 0; i < n; i++)
508
13.7M
    {
509
13.7M
      mp_limb_t a, b;
510
13.7M
      a = ap[i]; b = bp[i];
511
13.7M
      b += cy;
512
13.7M
      cy = (b < cy);
513
13.7M
      cy += (a < b);
514
13.7M
      rp[i] = a - b;
515
13.7M
    }
516
2.24M
  return cy;
517
2.24M
}
518
519
mp_limb_t
520
mpn_sub (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn)
521
3.27k
{
522
3.27k
  mp_limb_t cy;
523
524
3.27k
  assert (an >= bn);
525
526
3.27k
  cy = mpn_sub_n (rp, ap, bp, bn);
527
3.27k
  if (an > bn)
528
3.02k
    cy = mpn_sub_1 (rp + bn, ap + bn, an - bn, cy);
529
3.27k
  return cy;
530
3.27k
}
531
532
mp_limb_t
533
mpn_mul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
534
6.07M
{
535
6.07M
  mp_limb_t ul, cl, hpl, lpl;
536
537
6.07M
  assert (n >= 1);
538
539
6.07M
  cl = 0;
540
6.07M
  do
541
37.0M
    {
542
37.0M
      ul = *up++;
543
37.0M
      gmp_umul_ppmm (hpl, lpl, ul, vl);
544
545
37.0M
      lpl += cl;
546
37.0M
      cl = (lpl < cl) + hpl;
547
548
37.0M
      *rp++ = lpl;
549
37.0M
    }
550
37.0M
  while (--n != 0);
551
552
6.07M
  return cl;
553
6.07M
}
554
555
mp_limb_t
556
mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
557
28.8M
{
558
28.8M
  mp_limb_t ul, cl, hpl, lpl, rl;
559
560
28.8M
  assert (n >= 1);
561
562
28.8M
  cl = 0;
563
28.8M
  do
564
203M
    {
565
203M
      ul = *up++;
566
203M
      gmp_umul_ppmm (hpl, lpl, ul, vl);
567
568
203M
      lpl += cl;
569
203M
      cl = (lpl < cl) + hpl;
570
571
203M
      rl = *rp;
572
203M
      lpl = rl + lpl;
573
203M
      cl += lpl < rl;
574
203M
      *rp++ = lpl;
575
203M
    }
576
203M
  while (--n != 0);
577
578
28.8M
  return cl;
579
28.8M
}
580
581
mp_limb_t
582
mpn_submul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
583
1.82M
{
584
1.82M
  mp_limb_t ul, cl, hpl, lpl, rl;
585
586
1.82M
  assert (n >= 1);
587
588
1.82M
  cl = 0;
589
1.82M
  do
590
11.2M
    {
591
11.2M
      ul = *up++;
592
11.2M
      gmp_umul_ppmm (hpl, lpl, ul, vl);
593
594
11.2M
      lpl += cl;
595
11.2M
      cl = (lpl < cl) + hpl;
596
597
11.2M
      rl = *rp;
598
11.2M
      lpl = rl - lpl;
599
11.2M
      cl += lpl > rl;
600
11.2M
      *rp++ = lpl;
601
11.2M
    }
602
11.2M
  while (--n != 0);
603
604
1.82M
  return cl;
605
1.82M
}
606
607
mp_limb_t
608
mpn_mul (mp_ptr rp, mp_srcptr up, mp_size_t un, mp_srcptr vp, mp_size_t vn)
609
5.38M
{
610
5.38M
  assert (un >= vn);
611
5.38M
  assert (vn >= 1);
612
5.38M
  assert (!GMP_MPN_OVERLAP_P(rp, un + vn, up, un));
613
5.38M
  assert (!GMP_MPN_OVERLAP_P(rp, un + vn, vp, vn));
614
615
  /* We first multiply by the low order limb. This result can be
616
     stored, not added, to rp. We also avoid a loop for zeroing this
617
     way. */
618
619
5.38M
  rp[un] = mpn_mul_1 (rp, up, un, vp[0]);
620
621
  /* Now accumulate the product of up[] and the next higher limb from
622
     vp[]. */
623
624
32.6M
  while (--vn >= 1)
625
27.3M
    {
626
27.3M
      rp += 1, vp += 1;
627
27.3M
      rp[un] = mpn_addmul_1 (rp, up, un, vp[0]);
628
27.3M
    }
629
5.38M
  return rp[un];
630
5.38M
}
631
632
void
633
mpn_mul_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n)
634
2.48M
{
635
2.48M
  mpn_mul (rp, ap, n, bp, n);
636
2.48M
}
637
638
void
639
mpn_sqr (mp_ptr rp, mp_srcptr ap, mp_size_t n)
640
2.88M
{
641
2.88M
  mpn_mul (rp, ap, n, ap, n);
642
2.88M
}
643
644
mp_limb_t
645
mpn_lshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt)
646
349k
{
647
349k
  mp_limb_t high_limb, low_limb;
648
349k
  unsigned int tnc;
649
349k
  mp_limb_t retval;
650
651
349k
  assert (n >= 1);
652
349k
  assert (cnt >= 1);
653
349k
  assert (cnt < GMP_LIMB_BITS);
654
655
349k
  up += n;
656
349k
  rp += n;
657
658
349k
  tnc = GMP_LIMB_BITS - cnt;
659
349k
  low_limb = *--up;
660
349k
  retval = low_limb >> tnc;
661
349k
  high_limb = (low_limb << cnt);
662
663
1.21M
  while (--n != 0)
664
860k
    {
665
860k
      low_limb = *--up;
666
860k
      *--rp = high_limb | (low_limb >> tnc);
667
860k
      high_limb = (low_limb << cnt);
668
860k
    }
669
349k
  *--rp = high_limb;
670
671
349k
  return retval;
672
349k
}
673
674
mp_limb_t
675
mpn_rshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt)
676
2.29M
{
677
2.29M
  mp_limb_t high_limb, low_limb;
678
2.29M
  unsigned int tnc;
679
2.29M
  mp_limb_t retval;
680
681
2.29M
  assert (n >= 1);
682
2.29M
  assert (cnt >= 1);
683
2.29M
  assert (cnt < GMP_LIMB_BITS);
684
685
2.29M
  tnc = GMP_LIMB_BITS - cnt;
686
2.29M
  high_limb = *up++;
687
2.29M
  retval = (high_limb << tnc);
688
2.29M
  low_limb = high_limb >> cnt;
689
690
15.5M
  while (--n != 0)
691
13.2M
    {
692
13.2M
      high_limb = *up++;
693
13.2M
      *rp++ = low_limb | (high_limb << tnc);
694
13.2M
      low_limb = high_limb >> cnt;
695
13.2M
    }
696
2.29M
  *rp = low_limb;
697
698
2.29M
  return retval;
699
2.29M
}
700
701
static mp_bitcnt_t
702
mpn_common_scan (mp_limb_t limb, mp_size_t i, mp_srcptr up, mp_size_t un,
703
     mp_limb_t ux)
704
0
{
705
0
  unsigned cnt;
706
707
0
  assert (ux == 0 || ux == GMP_LIMB_MAX);
708
0
  assert (0 <= i && i <= un );
709
710
0
  while (limb == 0)
711
0
    {
712
0
      i++;
713
0
      if (i == un)
714
0
  return (ux == 0 ? ~(mp_bitcnt_t) 0 : un * GMP_LIMB_BITS);
715
0
      limb = ux ^ up[i];
716
0
    }
717
0
  gmp_ctz (cnt, limb);
718
0
  return (mp_bitcnt_t) i * GMP_LIMB_BITS + cnt;
719
0
}
720
721
mp_bitcnt_t
722
mpn_scan1 (mp_srcptr ptr, mp_bitcnt_t bit)
723
0
{
724
0
  mp_size_t i;
725
0
  i = bit / GMP_LIMB_BITS;
726
727
0
  return mpn_common_scan ( ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)),
728
0
        i, ptr, i, 0);
729
0
}
730
731
mp_bitcnt_t
732
mpn_scan0 (mp_srcptr ptr, mp_bitcnt_t bit)
733
0
{
734
0
  mp_size_t i;
735
0
  i = bit / GMP_LIMB_BITS;
736
737
0
  return mpn_common_scan (~ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)),
738
0
        i, ptr, i, GMP_LIMB_MAX);
739
0
}
740
741
void
742
mpn_com (mp_ptr rp, mp_srcptr up, mp_size_t n)
743
0
{
744
0
  while (--n >= 0)
745
0
    *rp++ = ~ *up++;
746
0
}
747
748
mp_limb_t
749
mpn_neg (mp_ptr rp, mp_srcptr up, mp_size_t n)
750
0
{
751
0
  while (*up == 0)
752
0
    {
753
0
      *rp = 0;
754
0
      if (!--n)
755
0
  return 0;
756
0
      ++up; ++rp;
757
0
    }
758
0
  *rp = - *up;
759
0
  mpn_com (++rp, ++up, --n);
760
0
  return 1;
761
0
}
762
763

764
/* MPN division interface. */
765
766
/* The 3/2 inverse is defined as
767
768
     m = floor( (B^3-1) / (B u1 + u0)) - B
769
*/
770
mp_limb_t
771
mpn_invert_3by2 (mp_limb_t u1, mp_limb_t u0)
772
12.6k
{
773
12.6k
  mp_limb_t r, m;
774
775
12.6k
  {
776
12.6k
    mp_limb_t p, ql;
777
12.6k
    unsigned ul, uh, qh;
778
779
    /* For notation, let b denote the half-limb base, so that B = b^2.
780
       Split u1 = b uh + ul. */
781
12.6k
    ul = u1 & GMP_LLIMB_MASK;
782
12.6k
    uh = u1 >> (GMP_LIMB_BITS / 2);
783
784
    /* Approximation of the high half of quotient. Differs from the 2/1
785
       inverse of the half limb uh, since we have already subtracted
786
       u0. */
787
12.6k
    qh = (u1 ^ GMP_LIMB_MAX) / uh;
788
789
    /* Adjust to get a half-limb 3/2 inverse, i.e., we want
790
791
       qh' = floor( (b^3 - 1) / u) - b = floor ((b^3 - b u - 1) / u
792
     = floor( (b (~u) + b-1) / u),
793
794
       and the remainder
795
796
       r = b (~u) + b-1 - qh (b uh + ul)
797
       = b (~u - qh uh) + b-1 - qh ul
798
799
       Subtraction of qh ul may underflow, which implies adjustments.
800
       But by normalization, 2 u >= B > qh ul, so we need to adjust by
801
       at most 2.
802
    */
803
804
12.6k
    r = ((~u1 - (mp_limb_t) qh * uh) << (GMP_LIMB_BITS / 2)) | GMP_LLIMB_MASK;
805
806
12.6k
    p = (mp_limb_t) qh * ul;
807
    /* Adjustment steps taken from udiv_qrnnd_c */
808
12.6k
    if (r < p)
809
3.58k
      {
810
3.58k
  qh--;
811
3.58k
  r += u1;
812
3.58k
  if (r >= u1) /* i.e. we didn't get carry when adding to r */
813
3.58k
    if (r < p)
814
0
      {
815
0
        qh--;
816
0
        r += u1;
817
0
      }
818
3.58k
      }
819
12.6k
    r -= p;
820
821
    /* Low half of the quotient is
822
823
       ql = floor ( (b r + b-1) / u1).
824
825
       This is a 3/2 division (on half-limbs), for which qh is a
826
       suitable inverse. */
827
828
12.6k
    p = (r >> (GMP_LIMB_BITS / 2)) * qh + r;
829
    /* Unlike full-limb 3/2, we can add 1 without overflow. For this to
830
       work, it is essential that ql is a full mp_limb_t. */
831
12.6k
    ql = (p >> (GMP_LIMB_BITS / 2)) + 1;
832
833
    /* By the 3/2 trick, we don't need the high half limb. */
834
12.6k
    r = (r << (GMP_LIMB_BITS / 2)) + GMP_LLIMB_MASK - ql * u1;
835
836
12.6k
    if (r >= (GMP_LIMB_MAX & (p << (GMP_LIMB_BITS / 2))))
837
0
      {
838
0
  ql--;
839
0
  r += u1;
840
0
      }
841
12.6k
    m = ((mp_limb_t) qh << (GMP_LIMB_BITS / 2)) + ql;
842
12.6k
    if (r >= u1)
843
0
      {
844
0
  m++;
845
0
  r -= u1;
846
0
      }
847
12.6k
  }
848
849
  /* Now m is the 2/1 inverse of u1. If u0 > 0, adjust it to become a
850
     3/2 inverse. */
851
12.6k
  if (u0 > 0)
852
1.37k
    {
853
1.37k
      mp_limb_t th, tl;
854
1.37k
      r = ~r;
855
1.37k
      r += u0;
856
1.37k
      if (r < u0)
857
1.37k
  {
858
1.37k
    m--;
859
1.37k
    if (r >= u1)
860
0
      {
861
0
        m--;
862
0
        r -= u1;
863
0
      }
864
1.37k
    r -= u1;
865
1.37k
  }
866
1.37k
      gmp_umul_ppmm (th, tl, u0, m);
867
1.37k
      r += th;
868
1.37k
      if (r < th)
869
0
  {
870
0
    m--;
871
0
    m -= ((r > u1) | ((r == u1) & (tl > u0)));
872
0
  }
873
1.37k
    }
874
875
12.6k
  return m;
876
12.6k
}
877
878
struct gmp_div_inverse
879
{
880
  /* Normalization shift count. */
881
  unsigned shift;
882
  /* Normalized divisor (d0 unused for mpn_div_qr_1) */
883
  mp_limb_t d1, d0;
884
  /* Inverse, for 2/1 or 3/2. */
885
  mp_limb_t di;
886
};
887
888
static void
889
mpn_div_qr_1_invert (struct gmp_div_inverse *inv, mp_limb_t d)
890
10.9k
{
891
10.9k
  unsigned shift;
892
893
10.9k
  assert (d > 0);
894
10.9k
  gmp_clz (shift, d);
895
10.9k
  inv->shift = shift;
896
10.9k
  inv->d1 = d << shift;
897
10.9k
  inv->di = mpn_invert_limb (inv->d1);
898
10.9k
}
899
900
static void
901
mpn_div_qr_2_invert (struct gmp_div_inverse *inv,
902
         mp_limb_t d1, mp_limb_t d0)
903
0
{
904
0
  unsigned shift;
905
906
0
  assert (d1 > 0);
907
0
  gmp_clz (shift, d1);
908
0
  inv->shift = shift;
909
0
  if (shift > 0)
910
0
    {
911
0
      d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift));
912
0
      d0 <<= shift;
913
0
    }
914
0
  inv->d1 = d1;
915
0
  inv->d0 = d0;
916
0
  inv->di = mpn_invert_3by2 (d1, d0);
917
0
}
918
919
static void
920
mpn_div_qr_invert (struct gmp_div_inverse *inv,
921
       mp_srcptr dp, mp_size_t dn)
922
1.68k
{
923
1.68k
  assert (dn > 0);
924
925
1.68k
  if (dn == 1)
926
0
    mpn_div_qr_1_invert (inv, dp[0]);
927
1.68k
  else if (dn == 2)
928
0
    mpn_div_qr_2_invert (inv, dp[1], dp[0]);
929
1.68k
  else
930
1.68k
    {
931
1.68k
      unsigned shift;
932
1.68k
      mp_limb_t d1, d0;
933
934
1.68k
      d1 = dp[dn-1];
935
1.68k
      d0 = dp[dn-2];
936
1.68k
      assert (d1 > 0);
937
1.68k
      gmp_clz (shift, d1);
938
1.68k
      inv->shift = shift;
939
1.68k
      if (shift > 0)
940
594
  {
941
594
    d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift));
942
594
    d0 = (d0 << shift) | (dp[dn-3] >> (GMP_LIMB_BITS - shift));
943
594
  }
944
1.68k
      inv->d1 = d1;
945
1.68k
      inv->d0 = d0;
946
1.68k
      inv->di = mpn_invert_3by2 (d1, d0);
947
1.68k
    }
948
1.68k
}
949
950
/* Not matching current public gmp interface, rather corresponding to
951
   the sbpi1_div_* functions. */
952
static mp_limb_t
953
mpn_div_qr_1_preinv (mp_ptr qp, mp_srcptr np, mp_size_t nn,
954
         const struct gmp_div_inverse *inv)
955
365k
{
956
365k
  mp_limb_t d, di;
957
365k
  mp_limb_t r;
958
365k
  mp_ptr tp = NULL;
959
960
365k
  if (inv->shift > 0)
961
348k
    {
962
      /* Shift, reusing qp area if possible. In-place shift if qp == np. */
963
348k
      tp = qp ? qp : gmp_xalloc_limbs (nn);
964
348k
      r = mpn_lshift (tp, np, nn, inv->shift);
965
348k
      np = tp;
966
348k
    }
967
17.2k
  else
968
17.2k
    r = 0;
969
970
365k
  d = inv->d1;
971
365k
  di = inv->di;
972
1.63M
  while (--nn >= 0)
973
1.27M
    {
974
1.27M
      mp_limb_t q;
975
976
1.27M
      gmp_udiv_qrnnd_preinv (q, r, r, np[nn], d, di);
977
1.27M
      if (qp)
978
1.27M
  qp[nn] = q;
979
1.27M
    }
980
365k
  if ((inv->shift > 0) && (tp != qp))
981
0
    gmp_free (tp);
982
983
365k
  return r >> inv->shift;
984
365k
}
985
986
static void
987
mpn_div_qr_2_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn,
988
         const struct gmp_div_inverse *inv)
989
0
{
990
0
  unsigned shift;
991
0
  mp_size_t i;
992
0
  mp_limb_t d1, d0, di, r1, r0;
993
994
0
  assert (nn >= 2);
995
0
  shift = inv->shift;
996
0
  d1 = inv->d1;
997
0
  d0 = inv->d0;
998
0
  di = inv->di;
999
1000
0
  if (shift > 0)
1001
0
    r1 = mpn_lshift (np, np, nn, shift);
1002
0
  else
1003
0
    r1 = 0;
1004
1005
0
  r0 = np[nn - 1];
1006
1007
0
  i = nn - 2;
1008
0
  do
1009
0
    {
1010
0
      mp_limb_t n0, q;
1011
0
      n0 = np[i];
1012
0
      gmp_udiv_qr_3by2 (q, r1, r0, r1, r0, n0, d1, d0, di);
1013
1014
0
      if (qp)
1015
0
  qp[i] = q;
1016
0
    }
1017
0
  while (--i >= 0);
1018
1019
0
  if (shift > 0)
1020
0
    {
1021
0
      assert ((r0 & (GMP_LIMB_MAX >> (GMP_LIMB_BITS - shift))) == 0);
1022
0
      r0 = (r0 >> shift) | (r1 << (GMP_LIMB_BITS - shift));
1023
0
      r1 >>= shift;
1024
0
    }
1025
1026
0
  np[1] = r1;
1027
0
  np[0] = r0;
1028
0
}
1029
1030
static void
1031
mpn_div_qr_pi1 (mp_ptr qp,
1032
    mp_ptr np, mp_size_t nn, mp_limb_t n1,
1033
    mp_srcptr dp, mp_size_t dn,
1034
    mp_limb_t dinv)
1035
1.68k
{
1036
1.68k
  mp_size_t i;
1037
1038
1.68k
  mp_limb_t d1, d0;
1039
1.68k
  mp_limb_t cy, cy1;
1040
1.68k
  mp_limb_t q;
1041
1042
1.68k
  assert (dn > 2);
1043
1.68k
  assert (nn >= dn);
1044
1045
1.68k
  d1 = dp[dn - 1];
1046
1.68k
  d0 = dp[dn - 2];
1047
1048
1.68k
  assert ((d1 & GMP_LIMB_HIGHBIT) != 0);
1049
  /* Iteration variable is the index of the q limb.
1050
   *
1051
   * We divide <n1, np[dn-1+i], np[dn-2+i], np[dn-3+i],..., np[i]>
1052
   * by            <d1,          d0,        dp[dn-3],  ..., dp[0] >
1053
   */
1054
1055
1.68k
  i = nn - dn;
1056
1.68k
  do
1057
11.6k
    {
1058
11.6k
      mp_limb_t n0 = np[dn-1+i];
1059
1060
11.6k
      if (n1 == d1 && n0 == d0)
1061
167
  {
1062
167
    q = GMP_LIMB_MAX;
1063
167
    mpn_submul_1 (np+i, dp, dn, q);
1064
167
    n1 = np[dn-1+i];  /* update n1, last loop's value will now be invalid */
1065
167
  }
1066
11.4k
      else
1067
11.4k
  {
1068
11.4k
    gmp_udiv_qr_3by2 (q, n1, n0, n1, n0, np[dn-2+i], d1, d0, dinv);
1069
1070
11.4k
    cy = mpn_submul_1 (np + i, dp, dn-2, q);
1071
1072
11.4k
    cy1 = n0 < cy;
1073
11.4k
    n0 = n0 - cy;
1074
11.4k
    cy = n1 < cy1;
1075
11.4k
    n1 = n1 - cy1;
1076
11.4k
    np[dn-2+i] = n0;
1077
1078
11.4k
    if (cy != 0)
1079
112
      {
1080
112
        n1 += d1 + mpn_add_n (np + i, np + i, dp, dn - 1);
1081
112
        q--;
1082
112
      }
1083
11.4k
  }
1084
1085
11.6k
      if (qp)
1086
0
  qp[i] = q;
1087
11.6k
    }
1088
11.6k
  while (--i >= 0);
1089
1090
1.68k
  np[dn - 1] = n1;
1091
1.68k
}
1092
1093
static void
1094
mpn_div_qr_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn,
1095
       mp_srcptr dp, mp_size_t dn,
1096
       const struct gmp_div_inverse *inv)
1097
1.68k
{
1098
1.68k
  assert (dn > 0);
1099
1.68k
  assert (nn >= dn);
1100
1101
1.68k
  if (dn == 1)
1102
0
    np[0] = mpn_div_qr_1_preinv (qp, np, nn, inv);
1103
1.68k
  else if (dn == 2)
1104
0
    mpn_div_qr_2_preinv (qp, np, nn, inv);
1105
1.68k
  else
1106
1.68k
    {
1107
1.68k
      mp_limb_t nh;
1108
1.68k
      unsigned shift;
1109
1110
1.68k
      assert (inv->d1 == dp[dn-1]);
1111
1.68k
      assert (inv->d0 == dp[dn-2]);
1112
1.68k
      assert ((inv->d1 & GMP_LIMB_HIGHBIT) != 0);
1113
1114
1.68k
      shift = inv->shift;
1115
1.68k
      if (shift > 0)
1116
594
  nh = mpn_lshift (np, np, nn, shift);
1117
1.08k
      else
1118
1.08k
  nh = 0;
1119
1120
1.68k
      mpn_div_qr_pi1 (qp, np, nn, nh, dp, dn, inv->di);
1121
1122
1.68k
      if (shift > 0)
1123
594
  gmp_assert_nocarry (mpn_rshift (np, np, dn, shift));
1124
1.68k
    }
1125
1.68k
}
1126
1127
static void
1128
mpn_div_qr (mp_ptr qp, mp_ptr np, mp_size_t nn, mp_srcptr dp, mp_size_t dn)
1129
1.68k
{
1130
1.68k
  struct gmp_div_inverse inv;
1131
1.68k
  mp_ptr tp = NULL;
1132
1133
1.68k
  assert (dn > 0);
1134
1.68k
  assert (nn >= dn);
1135
1136
1.68k
  mpn_div_qr_invert (&inv, dp, dn);
1137
1.68k
  if (dn > 2 && inv.shift > 0)
1138
594
    {
1139
594
      tp = gmp_xalloc_limbs (dn);
1140
594
      gmp_assert_nocarry (mpn_lshift (tp, dp, dn, inv.shift));
1141
594
      dp = tp;
1142
594
    }
1143
1.68k
  mpn_div_qr_preinv (qp, np, nn, dp, dn, &inv);
1144
1.68k
  if (tp)
1145
594
    gmp_free (tp);
1146
1.68k
}
1147
1148

1149
/* MPN base conversion. */
1150
static unsigned
1151
mpn_base_power_of_two_p (unsigned b)
1152
9.50k
{
1153
9.50k
  switch (b)
1154
9.50k
    {
1155
0
    case 2: return 1;
1156
0
    case 4: return 2;
1157
529
    case 8: return 3;
1158
0
    case 16: return 4;
1159
0
    case 32: return 5;
1160
0
    case 64: return 6;
1161
0
    case 128: return 7;
1162
0
    case 256: return 8;
1163
8.97k
    default: return 0;
1164
9.50k
    }
1165
9.50k
}
1166
1167
struct mpn_base_info
1168
{
1169
  /* bb is the largest power of the base which fits in one limb, and
1170
     exp is the corresponding exponent. */
1171
  unsigned exp;
1172
  mp_limb_t bb;
1173
};
1174
1175
static void
1176
mpn_get_base_info (struct mpn_base_info *info, mp_limb_t b)
1177
8.97k
{
1178
8.97k
  mp_limb_t m;
1179
8.97k
  mp_limb_t p;
1180
8.97k
  unsigned exp;
1181
1182
8.97k
  m = GMP_LIMB_MAX / b;
1183
170k
  for (exp = 1, p = b; p <= m; exp++)
1184
161k
    p *= b;
1185
1186
8.97k
  info->exp = exp;
1187
8.97k
  info->bb = p;
1188
8.97k
}
1189
1190
static mp_bitcnt_t
1191
mpn_limb_size_in_base_2 (mp_limb_t u)
1192
3.67k
{
1193
3.67k
  unsigned shift;
1194
1195
3.67k
  assert (u > 0);
1196
3.67k
  gmp_clz (shift, u);
1197
3.67k
  return GMP_LIMB_BITS - shift;
1198
3.67k
}
1199
1200
static size_t
1201
mpn_get_str_bits (unsigned char *sp, unsigned bits, mp_srcptr up, mp_size_t un)
1202
0
{
1203
0
  unsigned char mask;
1204
0
  size_t sn, j;
1205
0
  mp_size_t i;
1206
0
  unsigned shift;
1207
1208
0
  sn = ((un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1])
1209
0
  + bits - 1) / bits;
1210
1211
0
  mask = (1U << bits) - 1;
1212
1213
0
  for (i = 0, j = sn, shift = 0; j-- > 0;)
1214
0
    {
1215
0
      unsigned char digit = up[i] >> shift;
1216
1217
0
      shift += bits;
1218
1219
0
      if (shift >= GMP_LIMB_BITS && ++i < un)
1220
0
  {
1221
0
    shift -= GMP_LIMB_BITS;
1222
0
    digit |= up[i] << (bits - shift);
1223
0
  }
1224
0
      sp[j] = digit & mask;
1225
0
    }
1226
0
  return sn;
1227
0
}
1228
1229
/* We generate digits from the least significant end, and reverse at
1230
   the end. */
1231
static size_t
1232
mpn_limb_get_str (unsigned char *sp, mp_limb_t w,
1233
      const struct gmp_div_inverse *binv)
1234
20.9k
{
1235
20.9k
  mp_size_t i;
1236
367k
  for (i = 0; w > 0; i++)
1237
346k
    {
1238
346k
      mp_limb_t h, l, r;
1239
1240
346k
      h = w >> (GMP_LIMB_BITS - binv->shift);
1241
346k
      l = w << binv->shift;
1242
1243
346k
      gmp_udiv_qrnnd_preinv (w, r, h, l, binv->d1, binv->di);
1244
346k
      assert ((r & (GMP_LIMB_MAX >> (GMP_LIMB_BITS - binv->shift))) == 0);
1245
346k
      r >>= binv->shift;
1246
1247
346k
      sp[i] = r;
1248
346k
    }
1249
20.9k
  return i;
1250
20.9k
}
1251
1252
static size_t
1253
mpn_get_str_other (unsigned char *sp,
1254
       int base, const struct mpn_base_info *info,
1255
       mp_ptr up, mp_size_t un)
1256
3.67k
{
1257
3.67k
  struct gmp_div_inverse binv;
1258
3.67k
  size_t sn;
1259
3.67k
  size_t i;
1260
1261
3.67k
  mpn_div_qr_1_invert (&binv, base);
1262
1263
3.67k
  sn = 0;
1264
1265
3.67k
  if (un > 1)
1266
3.58k
    {
1267
3.58k
      struct gmp_div_inverse bbinv;
1268
3.58k
      mpn_div_qr_1_invert (&bbinv, info->bb);
1269
1270
3.58k
      do
1271
17.2k
  {
1272
17.2k
    mp_limb_t w;
1273
17.2k
    size_t done;
1274
17.2k
    w = mpn_div_qr_1_preinv (up, up, un, &bbinv);
1275
17.2k
    un -= (up[un-1] == 0);
1276
17.2k
    done = mpn_limb_get_str (sp + sn, w, &binv);
1277
1278
19.3k
    for (sn += done; done < info->exp; done++)
1279
2.07k
      sp[sn++] = 0;
1280
17.2k
  }
1281
17.2k
      while (un > 1);
1282
3.58k
    }
1283
3.67k
  sn += mpn_limb_get_str (sp + sn, up[0], &binv);
1284
1285
  /* Reverse order */
1286
177k
  for (i = 0; 2*i + 1 < sn; i++)
1287
173k
    {
1288
173k
      unsigned char t = sp[i];
1289
173k
      sp[i] = sp[sn - i - 1];
1290
173k
      sp[sn - i - 1] = t;
1291
173k
    }
1292
1293
3.67k
  return sn;
1294
3.67k
}
1295
1296
size_t
1297
mpn_get_str (unsigned char *sp, int base, mp_ptr up, mp_size_t un)
1298
0
{
1299
0
  unsigned bits;
1300
1301
0
  assert (un > 0);
1302
0
  assert (up[un-1] > 0);
1303
1304
0
  bits = mpn_base_power_of_two_p (base);
1305
0
  if (bits)
1306
0
    return mpn_get_str_bits (sp, bits, up, un);
1307
0
  else
1308
0
    {
1309
0
      struct mpn_base_info info;
1310
1311
0
      mpn_get_base_info (&info, base);
1312
0
      return mpn_get_str_other (sp, base, &info, up, un);
1313
0
    }
1314
0
}
1315
1316
static mp_size_t
1317
mpn_set_str_bits (mp_ptr rp, const unsigned char *sp, size_t sn,
1318
      unsigned bits)
1319
529
{
1320
529
  mp_size_t rn;
1321
529
  size_t j;
1322
529
  unsigned shift;
1323
1324
1.05k
  for (j = sn, rn = 0, shift = 0; j-- > 0; )
1325
529
    {
1326
529
      if (shift == 0)
1327
529
  {
1328
529
    rp[rn++] = sp[j];
1329
529
    shift += bits;
1330
529
  }
1331
0
      else
1332
0
  {
1333
0
    rp[rn-1] |= (mp_limb_t) sp[j] << shift;
1334
0
    shift += bits;
1335
0
    if (shift >= GMP_LIMB_BITS)
1336
0
      {
1337
0
        shift -= GMP_LIMB_BITS;
1338
0
        if (shift > 0)
1339
0
    rp[rn++] = (mp_limb_t) sp[j] >> (bits - shift);
1340
0
      }
1341
0
  }
1342
529
    }
1343
529
  rn = mpn_normalized_size (rp, rn);
1344
529
  return rn;
1345
529
}
1346
1347
/* Result is usually normalized, except for all-zero input, in which
1348
   case a single zero limb is written at *RP, and 1 is returned. */
1349
static mp_size_t
1350
mpn_set_str_other (mp_ptr rp, const unsigned char *sp, size_t sn,
1351
       mp_limb_t b, const struct mpn_base_info *info)
1352
5.30k
{
1353
5.30k
  mp_size_t rn;
1354
5.30k
  mp_limb_t w;
1355
5.30k
  unsigned k;
1356
5.30k
  size_t j;
1357
1358
5.30k
  assert (sn > 0);
1359
1360
5.30k
  k = 1 + (sn - 1) % info->exp;
1361
1362
5.30k
  j = 0;
1363
5.30k
  w = sp[j++];
1364
27.2k
  while (--k != 0)
1365
21.9k
    w = w * b + sp[j++];
1366
1367
5.30k
  rp[0] = w;
1368
1369
24.2k
  for (rn = 1; j < sn;)
1370
18.9k
    {
1371
18.9k
      mp_limb_t cy;
1372
1373
18.9k
      w = sp[j++];
1374
359k
      for (k = 1; k < info->exp; k++)
1375
340k
  w = w * b + sp[j++];
1376
1377
18.9k
      cy = mpn_mul_1 (rp, rp, rn, info->bb);
1378
18.9k
      cy += mpn_add_1 (rp, rp, rn, w);
1379
18.9k
      if (cy > 0)
1380
16.7k
  rp[rn++] = cy;
1381
18.9k
    }
1382
5.30k
  assert (j == sn);
1383
1384
5.30k
  return rn;
1385
5.30k
}
1386
1387
mp_size_t
1388
mpn_set_str (mp_ptr rp, const unsigned char *sp, size_t sn, int base)
1389
0
{
1390
0
  unsigned bits;
1391
1392
0
  if (sn == 0)
1393
0
    return 0;
1394
1395
0
  bits = mpn_base_power_of_two_p (base);
1396
0
  if (bits)
1397
0
    return mpn_set_str_bits (rp, sp, sn, bits);
1398
0
  else
1399
0
    {
1400
0
      struct mpn_base_info info;
1401
1402
0
      mpn_get_base_info (&info, base);
1403
0
      return mpn_set_str_other (rp, sp, sn, base, &info);
1404
0
    }
1405
0
}
1406
1407

1408
/* MPZ interface */
1409
void
1410
mpz_init (mpz_t r)
1411
22.4k
{
1412
22.4k
  static const mp_limb_t dummy_limb = GMP_LIMB_MAX & 0xc1a0;
1413
1414
22.4k
  r->_mp_alloc = 0;
1415
22.4k
  r->_mp_size = 0;
1416
22.4k
  r->_mp_d = (mp_ptr) &dummy_limb;
1417
22.4k
}
1418
1419
/* The utility of this function is a bit limited, since many functions
1420
   assigns the result variable using mpz_swap. */
1421
void
1422
mpz_init2 (mpz_t r, mp_bitcnt_t bits)
1423
4.45k
{
1424
4.45k
  mp_size_t rn;
1425
1426
4.45k
  bits -= (bits != 0);    /* Round down, except if 0 */
1427
4.45k
  rn = 1 + bits / GMP_LIMB_BITS;
1428
1429
4.45k
  r->_mp_alloc = rn;
1430
4.45k
  r->_mp_size = 0;
1431
4.45k
  r->_mp_d = gmp_xalloc_limbs (rn);
1432
4.45k
}
1433
1434
void
1435
mpz_clear (mpz_t r)
1436
22.7k
{
1437
22.7k
  if (r->_mp_alloc)
1438
19.1k
    gmp_free (r->_mp_d);
1439
22.7k
}
1440
1441
static mp_ptr
1442
mpz_realloc (mpz_t r, mp_size_t size)
1443
16.0k
{
1444
16.0k
  size = GMP_MAX (size, 1);
1445
1446
16.0k
  if (r->_mp_alloc)
1447
1.41k
    r->_mp_d = gmp_xrealloc_limbs (r->_mp_d, size);
1448
14.6k
  else
1449
14.6k
    r->_mp_d = gmp_xalloc_limbs (size);
1450
16.0k
  r->_mp_alloc = size;
1451
1452
16.0k
  if (GMP_ABS (r->_mp_size) > size)
1453
0
    r->_mp_size = 0;
1454
1455
16.0k
  return r->_mp_d;
1456
16.0k
}
1457
1458
/* Realloc for an mpz_t WHAT if it has less than NEEDED limbs.  */
1459
17.9k
#define MPZ_REALLOC(z,n) ((n) > (z)->_mp_alloc      \
1460
17.9k
        ? mpz_realloc(z,n)      \
1461
17.9k
        : (z)->_mp_d)
1462

1463
/* MPZ assignment and basic conversions. */
1464
void
1465
mpz_set_si (mpz_t r, signed long int x)
1466
0
{
1467
0
  if (x >= 0)
1468
0
    mpz_set_ui (r, x);
1469
0
  else /* (x < 0) */
1470
0
    if (GMP_LIMB_BITS < GMP_ULONG_BITS)
1471
0
      {
1472
0
  mpz_set_ui (r, GMP_NEG_CAST (unsigned long int, x));
1473
0
  mpz_neg (r, r);
1474
0
      }
1475
0
  else
1476
0
    {
1477
0
      r->_mp_size = -1;
1478
0
      MPZ_REALLOC (r, 1)[0] = GMP_NEG_CAST (unsigned long int, x);
1479
0
    }
1480
0
}
1481
1482
void
1483
mpz_set_ui (mpz_t r, unsigned long int x)
1484
1.68k
{
1485
1.68k
  if (x > 0)
1486
1.68k
    {
1487
1.68k
      r->_mp_size = 1;
1488
1.68k
      MPZ_REALLOC (r, 1)[0] = x;
1489
1.68k
      if (GMP_LIMB_BITS < GMP_ULONG_BITS)
1490
0
  {
1491
0
    int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS;
1492
0
    while (x >>= LOCAL_GMP_LIMB_BITS)
1493
0
      {
1494
0
        ++ r->_mp_size;
1495
0
        MPZ_REALLOC (r, r->_mp_size)[r->_mp_size - 1] = x;
1496
0
      }
1497
0
  }
1498
1.68k
    }
1499
0
  else
1500
0
    r->_mp_size = 0;
1501
1.68k
}
1502
1503
void
1504
mpz_set (mpz_t r, const mpz_t x)
1505
5.04k
{
1506
  /* Allow the NOP r == x */
1507
5.04k
  if (r != x)
1508
1.68k
    {
1509
1.68k
      mp_size_t n;
1510
1.68k
      mp_ptr rp;
1511
1512
1.68k
      n = GMP_ABS (x->_mp_size);
1513
1.68k
      rp = MPZ_REALLOC (r, n);
1514
1515
1.68k
      mpn_copyi (rp, x->_mp_d, n);
1516
1.68k
      r->_mp_size = x->_mp_size;
1517
1.68k
    }
1518
5.04k
}
1519
1520
void
1521
mpz_init_set_si (mpz_t r, signed long int x)
1522
0
{
1523
0
  mpz_init (r);
1524
0
  mpz_set_si (r, x);
1525
0
}
1526
1527
void
1528
mpz_init_set_ui (mpz_t r, unsigned long int x)
1529
1.68k
{
1530
1.68k
  mpz_init (r);
1531
1.68k
  mpz_set_ui (r, x);
1532
1.68k
}
1533
1534
void
1535
mpz_init_set (mpz_t r, const mpz_t x)
1536
1.68k
{
1537
1.68k
  mpz_init (r);
1538
1.68k
  mpz_set (r, x);
1539
1.68k
}
1540
1541
int
1542
mpz_fits_slong_p (const mpz_t u)
1543
0
{
1544
0
  return mpz_cmp_si (u, LONG_MAX) <= 0 && mpz_cmp_si (u, LONG_MIN) >= 0;
1545
0
}
1546
1547
static int
1548
mpn_absfits_ulong_p (mp_srcptr up, mp_size_t un)
1549
0
{
1550
0
  int ulongsize = GMP_ULONG_BITS / GMP_LIMB_BITS;
1551
0
  mp_limb_t ulongrem = 0;
1552
1553
0
  if (GMP_ULONG_BITS % GMP_LIMB_BITS != 0)
1554
0
    ulongrem = (mp_limb_t) (ULONG_MAX >> GMP_LIMB_BITS * ulongsize) + 1;
1555
1556
0
  return un <= ulongsize || (up[ulongsize] < ulongrem && un == ulongsize + 1);
1557
0
}
1558
1559
int
1560
mpz_fits_ulong_p (const mpz_t u)
1561
0
{
1562
0
  mp_size_t us = u->_mp_size;
1563
1564
0
  return us >= 0 && mpn_absfits_ulong_p (u->_mp_d, us);
1565
0
}
1566
1567
int
1568
mpz_fits_sint_p (const mpz_t u)
1569
0
{
1570
0
  return mpz_cmp_si (u, INT_MAX) <= 0 && mpz_cmp_si (u, INT_MIN) >= 0;
1571
0
}
1572
1573
int
1574
mpz_fits_uint_p (const mpz_t u)
1575
0
{
1576
0
  return u->_mp_size >= 0 && mpz_cmpabs_ui (u, UINT_MAX) <= 0;
1577
0
}
1578
1579
int
1580
mpz_fits_sshort_p (const mpz_t u)
1581
0
{
1582
0
  return mpz_cmp_si (u, SHRT_MAX) <= 0 && mpz_cmp_si (u, SHRT_MIN) >= 0;
1583
0
}
1584
1585
int
1586
mpz_fits_ushort_p (const mpz_t u)
1587
0
{
1588
0
  return u->_mp_size >= 0 && mpz_cmpabs_ui (u, USHRT_MAX) <= 0;
1589
0
}
1590
1591
long int
1592
mpz_get_si (const mpz_t u)
1593
0
{
1594
0
  unsigned long r = mpz_get_ui (u);
1595
0
  unsigned long c = -LONG_MAX - LONG_MIN;
1596
1597
0
  if (u->_mp_size < 0)
1598
    /* This expression is necessary to properly handle -LONG_MIN */
1599
0
    return -(long) c - (long) ((r - c) & LONG_MAX);
1600
0
  else
1601
0
    return (long) (r & LONG_MAX);
1602
0
}
1603
1604
unsigned long int
1605
mpz_get_ui (const mpz_t u)
1606
0
{
1607
0
  if (GMP_LIMB_BITS < GMP_ULONG_BITS)
1608
0
    {
1609
0
      int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS;
1610
0
      unsigned long r = 0;
1611
0
      mp_size_t n = GMP_ABS (u->_mp_size);
1612
0
      n = GMP_MIN (n, 1 + (mp_size_t) (GMP_ULONG_BITS - 1) / GMP_LIMB_BITS);
1613
0
      while (--n >= 0)
1614
0
  r = (r << LOCAL_GMP_LIMB_BITS) + u->_mp_d[n];
1615
0
      return r;
1616
0
    }
1617
1618
0
  return u->_mp_size == 0 ? 0 : u->_mp_d[0];
1619
0
}
1620
1621
size_t
1622
mpz_size (const mpz_t u)
1623
5.15k
{
1624
5.15k
  return GMP_ABS (u->_mp_size);
1625
5.15k
}
1626
1627
mp_limb_t
1628
mpz_getlimbn (const mpz_t u, mp_size_t n)
1629
0
{
1630
0
  if (n >= 0 && n < GMP_ABS (u->_mp_size))
1631
0
    return u->_mp_d[n];
1632
0
  else
1633
0
    return 0;
1634
0
}
1635
1636
void
1637
mpz_realloc2 (mpz_t x, mp_bitcnt_t n)
1638
0
{
1639
0
  mpz_realloc (x, 1 + (n - (n != 0)) / GMP_LIMB_BITS);
1640
0
}
1641
1642
mp_srcptr
1643
mpz_limbs_read (mpz_srcptr x)
1644
3.78k
{
1645
3.78k
  return x->_mp_d;
1646
3.78k
}
1647
1648
mp_ptr
1649
mpz_limbs_modify (mpz_t x, mp_size_t n)
1650
3.68k
{
1651
3.68k
  assert (n > 0);
1652
3.68k
  return MPZ_REALLOC (x, n);
1653
3.68k
}
1654
1655
mp_ptr
1656
mpz_limbs_write (mpz_t x, mp_size_t n)
1657
3.68k
{
1658
3.68k
  return mpz_limbs_modify (x, n);
1659
3.68k
}
1660
1661
void
1662
mpz_limbs_finish (mpz_t x, mp_size_t xs)
1663
9.71k
{
1664
9.71k
  mp_size_t xn;
1665
9.71k
  xn = mpn_normalized_size (x->_mp_d, GMP_ABS (xs));
1666
9.71k
  x->_mp_size = xs < 0 ? -xn : xn;
1667
9.71k
}
1668
1669
static mpz_srcptr
1670
mpz_roinit_normal_n (mpz_t x, mp_srcptr xp, mp_size_t xs)
1671
6.02k
{
1672
6.02k
  x->_mp_alloc = 0;
1673
6.02k
  x->_mp_d = (mp_ptr) xp;
1674
6.02k
  x->_mp_size = xs;
1675
6.02k
  return x;
1676
6.02k
}
1677
1678
mpz_srcptr
1679
mpz_roinit_n (mpz_t x, mp_srcptr xp, mp_size_t xs)
1680
6.02k
{
1681
6.02k
  mpz_roinit_normal_n (x, xp, xs);
1682
6.02k
  mpz_limbs_finish (x, xs);
1683
6.02k
  return x;
1684
6.02k
}
1685
1686

1687
/* Conversions and comparison to double. */
1688
void
1689
mpz_set_d (mpz_t r, double x)
1690
0
{
1691
0
  int sign;
1692
0
  mp_ptr rp;
1693
0
  mp_size_t rn, i;
1694
0
  double B;
1695
0
  double Bi;
1696
0
  mp_limb_t f;
1697
1698
  /* x != x is true when x is a NaN, and x == x * 0.5 is true when x is
1699
     zero or infinity. */
1700
0
  if (x != x || x == x * 0.5)
1701
0
    {
1702
0
      r->_mp_size = 0;
1703
0
      return;
1704
0
    }
1705
1706
0
  sign = x < 0.0 ;
1707
0
  if (sign)
1708
0
    x = - x;
1709
1710
0
  if (x < 1.0)
1711
0
    {
1712
0
      r->_mp_size = 0;
1713
0
      return;
1714
0
    }
1715
0
  B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1);
1716
0
  Bi = 1.0 / B;
1717
0
  for (rn = 1; x >= B; rn++)
1718
0
    x *= Bi;
1719
1720
0
  rp = MPZ_REALLOC (r, rn);
1721
1722
0
  f = (mp_limb_t) x;
1723
0
  x -= f;
1724
0
  assert (x < 1.0);
1725
0
  i = rn-1;
1726
0
  rp[i] = f;
1727
0
  while (--i >= 0)
1728
0
    {
1729
0
      x = B * x;
1730
0
      f = (mp_limb_t) x;
1731
0
      x -= f;
1732
0
      assert (x < 1.0);
1733
0
      rp[i] = f;
1734
0
    }
1735
1736
0
  r->_mp_size = sign ? - rn : rn;
1737
0
}
1738
1739
void
1740
mpz_init_set_d (mpz_t r, double x)
1741
0
{
1742
0
  mpz_init (r);
1743
0
  mpz_set_d (r, x);
1744
0
}
1745
1746
double
1747
mpz_get_d (const mpz_t u)
1748
0
{
1749
0
  int m;
1750
0
  mp_limb_t l;
1751
0
  mp_size_t un;
1752
0
  double x;
1753
0
  double B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1);
1754
1755
0
  un = GMP_ABS (u->_mp_size);
1756
1757
0
  if (un == 0)
1758
0
    return 0.0;
1759
1760
0
  l = u->_mp_d[--un];
1761
0
  gmp_clz (m, l);
1762
0
  m = m + GMP_DBL_MANT_BITS - GMP_LIMB_BITS;
1763
0
  if (m < 0)
1764
0
    l &= GMP_LIMB_MAX << -m;
1765
1766
0
  for (x = l; --un >= 0;)
1767
0
    {
1768
0
      x = B*x;
1769
0
      if (m > 0) {
1770
0
  l = u->_mp_d[un];
1771
0
  m -= GMP_LIMB_BITS;
1772
0
  if (m < 0)
1773
0
    l &= GMP_LIMB_MAX << -m;
1774
0
  x += l;
1775
0
      }
1776
0
    }
1777
1778
0
  if (u->_mp_size < 0)
1779
0
    x = -x;
1780
1781
0
  return x;
1782
0
}
1783
1784
int
1785
mpz_cmpabs_d (const mpz_t x, double d)
1786
0
{
1787
0
  mp_size_t xn;
1788
0
  double B, Bi;
1789
0
  mp_size_t i;
1790
1791
0
  xn = x->_mp_size;
1792
0
  d = GMP_ABS (d);
1793
1794
0
  if (xn != 0)
1795
0
    {
1796
0
      xn = GMP_ABS (xn);
1797
1798
0
      B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1);
1799
0
      Bi = 1.0 / B;
1800
1801
      /* Scale d so it can be compared with the top limb. */
1802
0
      for (i = 1; i < xn; i++)
1803
0
  d *= Bi;
1804
1805
0
      if (d >= B)
1806
0
  return -1;
1807
1808
      /* Compare floor(d) to top limb, subtract and cancel when equal. */
1809
0
      for (i = xn; i-- > 0;)
1810
0
  {
1811
0
    mp_limb_t f, xl;
1812
1813
0
    f = (mp_limb_t) d;
1814
0
    xl = x->_mp_d[i];
1815
0
    if (xl > f)
1816
0
      return 1;
1817
0
    else if (xl < f)
1818
0
      return -1;
1819
0
    d = B * (d - f);
1820
0
  }
1821
0
    }
1822
0
  return - (d > 0.0);
1823
0
}
1824
1825
int
1826
mpz_cmp_d (const mpz_t x, double d)
1827
0
{
1828
0
  if (x->_mp_size < 0)
1829
0
    {
1830
0
      if (d >= 0.0)
1831
0
  return -1;
1832
0
      else
1833
0
  return -mpz_cmpabs_d (x, d);
1834
0
    }
1835
0
  else
1836
0
    {
1837
0
      if (d < 0.0)
1838
0
  return 1;
1839
0
      else
1840
0
  return mpz_cmpabs_d (x, d);
1841
0
    }
1842
0
}
1843
1844

1845
/* MPZ comparisons and the like. */
1846
int
1847
mpz_sgn (const mpz_t u)
1848
8.58k
{
1849
8.58k
  return GMP_CMP (u->_mp_size, 0);
1850
8.58k
}
1851
1852
int
1853
mpz_cmp_si (const mpz_t u, long v)
1854
0
{
1855
0
  mp_size_t usize = u->_mp_size;
1856
1857
0
  if (v >= 0)
1858
0
    return mpz_cmp_ui (u, v);
1859
0
  else if (usize >= 0)
1860
0
    return 1;
1861
0
  else
1862
0
    return - mpz_cmpabs_ui (u, GMP_NEG_CAST (unsigned long int, v));
1863
0
}
1864
1865
int
1866
mpz_cmp_ui (const mpz_t u, unsigned long v)
1867
0
{
1868
0
  mp_size_t usize = u->_mp_size;
1869
1870
0
  if (usize < 0)
1871
0
    return -1;
1872
0
  else
1873
0
    return mpz_cmpabs_ui (u, v);
1874
0
}
1875
1876
int
1877
mpz_cmp (const mpz_t a, const mpz_t b)
1878
4.36k
{
1879
4.36k
  mp_size_t asize = a->_mp_size;
1880
4.36k
  mp_size_t bsize = b->_mp_size;
1881
1882
4.36k
  if (asize != bsize)
1883
2.58k
    return (asize < bsize) ? -1 : 1;
1884
1.77k
  else if (asize >= 0)
1885
1.77k
    return mpn_cmp (a->_mp_d, b->_mp_d, asize);
1886
0
  else
1887
0
    return mpn_cmp (b->_mp_d, a->_mp_d, -asize);
1888
4.36k
}
1889
1890
int
1891
mpz_cmpabs_ui (const mpz_t u, unsigned long v)
1892
0
{
1893
0
  mp_size_t un = GMP_ABS (u->_mp_size);
1894
1895
0
  if (! mpn_absfits_ulong_p (u->_mp_d, un))
1896
0
    return 1;
1897
0
  else
1898
0
    {
1899
0
      unsigned long uu = mpz_get_ui (u);
1900
0
      return GMP_CMP(uu, v);
1901
0
    }
1902
0
}
1903
1904
int
1905
mpz_cmpabs (const mpz_t u, const mpz_t v)
1906
0
{
1907
0
  return mpn_cmp4 (u->_mp_d, GMP_ABS (u->_mp_size),
1908
0
       v->_mp_d, GMP_ABS (v->_mp_size));
1909
0
}
1910
1911
void
1912
mpz_abs (mpz_t r, const mpz_t u)
1913
0
{
1914
0
  mpz_set (r, u);
1915
0
  r->_mp_size = GMP_ABS (r->_mp_size);
1916
0
}
1917
1918
void
1919
mpz_neg (mpz_t r, const mpz_t u)
1920
3.36k
{
1921
3.36k
  mpz_set (r, u);
1922
3.36k
  r->_mp_size = -r->_mp_size;
1923
3.36k
}
1924
1925
void
1926
mpz_swap (mpz_t u, mpz_t v)
1927
4.45k
{
1928
4.45k
  MP_SIZE_T_SWAP (u->_mp_size, v->_mp_size);
1929
4.45k
  MP_SIZE_T_SWAP (u->_mp_alloc, v->_mp_alloc);
1930
4.45k
  MP_PTR_SWAP (u->_mp_d, v->_mp_d);
1931
4.45k
}
1932
1933

1934
/* MPZ addition and subtraction */
1935
1936
1937
void
1938
mpz_add_ui (mpz_t r, const mpz_t a, unsigned long b)
1939
1.68k
{
1940
1.68k
  mpz_t bb;
1941
1.68k
  mpz_init_set_ui (bb, b);
1942
1.68k
  mpz_add (r, a, bb);
1943
1.68k
  mpz_clear (bb);
1944
1.68k
}
1945
1946
void
1947
mpz_sub_ui (mpz_t r, const mpz_t a, unsigned long b)
1948
1.68k
{
1949
1.68k
  mpz_ui_sub (r, b, a);
1950
1.68k
  mpz_neg (r, r);
1951
1.68k
}
1952
1953
void
1954
mpz_ui_sub (mpz_t r, unsigned long a, const mpz_t b)
1955
1.68k
{
1956
1.68k
  mpz_neg (r, b);
1957
1.68k
  mpz_add_ui (r, r, a);
1958
1.68k
}
1959
1960
static mp_size_t
1961
mpz_abs_add (mpz_t r, const mpz_t a, const mpz_t b)
1962
1.77k
{
1963
1.77k
  mp_size_t an = GMP_ABS (a->_mp_size);
1964
1.77k
  mp_size_t bn = GMP_ABS (b->_mp_size);
1965
1.77k
  mp_ptr rp;
1966
1.77k
  mp_limb_t cy;
1967
1968
1.77k
  if (an < bn)
1969
444
    {
1970
444
      MPZ_SRCPTR_SWAP (a, b);
1971
444
      MP_SIZE_T_SWAP (an, bn);
1972
444
    }
1973
1974
1.77k
  rp = MPZ_REALLOC (r, an + 1);
1975
1.77k
  cy = mpn_add (rp, a->_mp_d, an, b->_mp_d, bn);
1976
1977
1.77k
  rp[an] = cy;
1978
1979
1.77k
  return an + cy;
1980
1.77k
}
1981
1982
static mp_size_t
1983
mpz_abs_sub (mpz_t r, const mpz_t a, const mpz_t b)
1984
3.27k
{
1985
3.27k
  mp_size_t an = GMP_ABS (a->_mp_size);
1986
3.27k
  mp_size_t bn = GMP_ABS (b->_mp_size);
1987
3.27k
  int cmp;
1988
3.27k
  mp_ptr rp;
1989
1990
3.27k
  cmp = mpn_cmp4 (a->_mp_d, an, b->_mp_d, bn);
1991
3.27k
  if (cmp > 0)
1992
1.78k
    {
1993
1.78k
      rp = MPZ_REALLOC (r, an);
1994
1.78k
      gmp_assert_nocarry (mpn_sub (rp, a->_mp_d, an, b->_mp_d, bn));
1995
1.78k
      return mpn_normalized_size (rp, an);
1996
1.78k
    }
1997
1.48k
  else if (cmp < 0)
1998
1.48k
    {
1999
1.48k
      rp = MPZ_REALLOC (r, bn);
2000
1.48k
      gmp_assert_nocarry (mpn_sub (rp, b->_mp_d, bn, a->_mp_d, an));
2001
1.48k
      return -mpn_normalized_size (rp, bn);
2002
1.48k
    }
2003
0
  else
2004
0
    return 0;
2005
3.27k
}
2006
2007
void
2008
mpz_add (mpz_t r, const mpz_t a, const mpz_t b)
2009
3.36k
{
2010
3.36k
  mp_size_t rn;
2011
2012
3.36k
  if ( (a->_mp_size ^ b->_mp_size) >= 0)
2013
1.77k
    rn = mpz_abs_add (r, a, b);
2014
1.59k
  else
2015
1.59k
    rn = mpz_abs_sub (r, a, b);
2016
2017
3.36k
  r->_mp_size = a->_mp_size >= 0 ? rn : - rn;
2018
3.36k
}
2019
2020
void
2021
mpz_sub (mpz_t r, const mpz_t a, const mpz_t b)
2022
1.68k
{
2023
1.68k
  mp_size_t rn;
2024
2025
1.68k
  if ( (a->_mp_size ^ b->_mp_size) >= 0)
2026
1.68k
    rn = mpz_abs_sub (r, a, b);
2027
0
  else
2028
0
    rn = mpz_abs_add (r, a, b);
2029
2030
1.68k
  r->_mp_size = a->_mp_size >= 0 ? rn : - rn;
2031
1.68k
}
2032
2033

2034
/* MPZ multiplication */
2035
void
2036
mpz_mul_si (mpz_t r, const mpz_t u, long int v)
2037
0
{
2038
0
  if (v < 0)
2039
0
    {
2040
0
      mpz_mul_ui (r, u, GMP_NEG_CAST (unsigned long int, v));
2041
0
      mpz_neg (r, r);
2042
0
    }
2043
0
  else
2044
0
    mpz_mul_ui (r, u, v);
2045
0
}
2046
2047
void
2048
mpz_mul_ui (mpz_t r, const mpz_t u, unsigned long int v)
2049
0
{
2050
0
  mpz_t vv;
2051
0
  mpz_init_set_ui (vv, v);
2052
0
  mpz_mul (r, u, vv);
2053
0
  mpz_clear (vv);
2054
0
  return;
2055
0
}
2056
2057
void
2058
mpz_mul (mpz_t r, const mpz_t u, const mpz_t v)
2059
5.04k
{
2060
5.04k
  int sign;
2061
5.04k
  mp_size_t un, vn, rn;
2062
5.04k
  mpz_t t;
2063
5.04k
  mp_ptr tp;
2064
2065
5.04k
  un = u->_mp_size;
2066
5.04k
  vn = v->_mp_size;
2067
2068
5.04k
  if (un == 0 || vn == 0)
2069
586
    {
2070
586
      r->_mp_size = 0;
2071
586
      return;
2072
586
    }
2073
2074
4.45k
  sign = (un ^ vn) < 0;
2075
2076
4.45k
  un = GMP_ABS (un);
2077
4.45k
  vn = GMP_ABS (vn);
2078
2079
4.45k
  mpz_init2 (t, (un + vn) * GMP_LIMB_BITS);
2080
2081
4.45k
  tp = t->_mp_d;
2082
4.45k
  if (un >= vn)
2083
4.45k
    mpn_mul (tp, u->_mp_d, un, v->_mp_d, vn);
2084
0
  else
2085
0
    mpn_mul (tp, v->_mp_d, vn, u->_mp_d, un);
2086
2087
4.45k
  rn = un + vn;
2088
4.45k
  rn -= tp[rn-1] == 0;
2089
2090
4.45k
  t->_mp_size = sign ? - rn : rn;
2091
4.45k
  mpz_swap (r, t);
2092
4.45k
  mpz_clear (t);
2093
4.45k
}
2094
2095
void
2096
mpz_mul_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bits)
2097
0
{
2098
0
  mp_size_t un, rn;
2099
0
  mp_size_t limbs;
2100
0
  unsigned shift;
2101
0
  mp_ptr rp;
2102
2103
0
  un = GMP_ABS (u->_mp_size);
2104
0
  if (un == 0)
2105
0
    {
2106
0
      r->_mp_size = 0;
2107
0
      return;
2108
0
    }
2109
2110
0
  limbs = bits / GMP_LIMB_BITS;
2111
0
  shift = bits % GMP_LIMB_BITS;
2112
2113
0
  rn = un + limbs + (shift > 0);
2114
0
  rp = MPZ_REALLOC (r, rn);
2115
0
  if (shift > 0)
2116
0
    {
2117
0
      mp_limb_t cy = mpn_lshift (rp + limbs, u->_mp_d, un, shift);
2118
0
      rp[rn-1] = cy;
2119
0
      rn -= (cy == 0);
2120
0
    }
2121
0
  else
2122
0
    mpn_copyd (rp + limbs, u->_mp_d, un);
2123
2124
0
  mpn_zero (rp, limbs);
2125
2126
0
  r->_mp_size = (u->_mp_size < 0) ? - rn : rn;
2127
0
}
2128
2129
void
2130
mpz_addmul_ui (mpz_t r, const mpz_t u, unsigned long int v)
2131
0
{
2132
0
  mpz_t t;
2133
0
  mpz_init_set_ui (t, v);
2134
0
  mpz_mul (t, u, t);
2135
0
  mpz_add (r, r, t);
2136
0
  mpz_clear (t);
2137
0
}
2138
2139
void
2140
mpz_submul_ui (mpz_t r, const mpz_t u, unsigned long int v)
2141
0
{
2142
0
  mpz_t t;
2143
0
  mpz_init_set_ui (t, v);
2144
0
  mpz_mul (t, u, t);
2145
0
  mpz_sub (r, r, t);
2146
0
  mpz_clear (t);
2147
0
}
2148
2149
void
2150
mpz_addmul (mpz_t r, const mpz_t u, const mpz_t v)
2151
0
{
2152
0
  mpz_t t;
2153
0
  mpz_init (t);
2154
0
  mpz_mul (t, u, v);
2155
0
  mpz_add (r, r, t);
2156
0
  mpz_clear (t);
2157
0
}
2158
2159
void
2160
mpz_submul (mpz_t r, const mpz_t u, const mpz_t v)
2161
0
{
2162
0
  mpz_t t;
2163
0
  mpz_init (t);
2164
0
  mpz_mul (t, u, v);
2165
0
  mpz_sub (r, r, t);
2166
0
  mpz_clear (t);
2167
0
}
2168
2169

2170
/* MPZ division */
2171
enum mpz_div_round_mode { GMP_DIV_FLOOR, GMP_DIV_CEIL, GMP_DIV_TRUNC };
2172
2173
/* Allows q or r to be zero. Returns 1 iff remainder is non-zero. */
2174
static int
2175
mpz_div_qr (mpz_t q, mpz_t r,
2176
      const mpz_t n, const mpz_t d, enum mpz_div_round_mode mode)
2177
1.68k
{
2178
1.68k
  mp_size_t ns, ds, nn, dn, qs;
2179
1.68k
  ns = n->_mp_size;
2180
1.68k
  ds = d->_mp_size;
2181
2182
1.68k
  if (ds == 0)
2183
0
    gmp_die("mpz_div_qr: Divide by zero.");
2184
2185
1.68k
  if (ns == 0)
2186
0
    {
2187
0
      if (q)
2188
0
  q->_mp_size = 0;
2189
0
      if (r)
2190
0
  r->_mp_size = 0;
2191
0
      return 0;
2192
0
    }
2193
2194
1.68k
  nn = GMP_ABS (ns);
2195
1.68k
  dn = GMP_ABS (ds);
2196
2197
1.68k
  qs = ds ^ ns;
2198
2199
1.68k
  if (nn < dn)
2200
0
    {
2201
0
      if (mode == GMP_DIV_CEIL && qs >= 0)
2202
0
  {
2203
    /* q = 1, r = n - d */
2204
0
    if (r)
2205
0
      mpz_sub (r, n, d);
2206
0
    if (q)
2207
0
      mpz_set_ui (q, 1);
2208
0
  }
2209
0
      else if (mode == GMP_DIV_FLOOR && qs < 0)
2210
0
  {
2211
    /* q = -1, r = n + d */
2212
0
    if (r)
2213
0
      mpz_add (r, n, d);
2214
0
    if (q)
2215
0
      mpz_set_si (q, -1);
2216
0
  }
2217
0
      else
2218
0
  {
2219
    /* q = 0, r = d */
2220
0
    if (r)
2221
0
      mpz_set (r, n);
2222
0
    if (q)
2223
0
      q->_mp_size = 0;
2224
0
  }
2225
0
      return 1;
2226
0
    }
2227
1.68k
  else
2228
1.68k
    {
2229
1.68k
      mp_ptr np, qp;
2230
1.68k
      mp_size_t qn, rn;
2231
1.68k
      mpz_t tq, tr;
2232
2233
1.68k
      mpz_init_set (tr, n);
2234
1.68k
      np = tr->_mp_d;
2235
2236
1.68k
      qn = nn - dn + 1;
2237
2238
1.68k
      if (q)
2239
0
  {
2240
0
    mpz_init2 (tq, qn * GMP_LIMB_BITS);
2241
0
    qp = tq->_mp_d;
2242
0
  }
2243
1.68k
      else
2244
1.68k
  qp = NULL;
2245
2246
1.68k
      mpn_div_qr (qp, np, nn, d->_mp_d, dn);
2247
2248
1.68k
      if (qp)
2249
0
  {
2250
0
    qn -= (qp[qn-1] == 0);
2251
2252
0
    tq->_mp_size = qs < 0 ? -qn : qn;
2253
0
  }
2254
1.68k
      rn = mpn_normalized_size (np, dn);
2255
1.68k
      tr->_mp_size = ns < 0 ? - rn : rn;
2256
2257
1.68k
      if (mode == GMP_DIV_FLOOR && qs < 0 && rn != 0)
2258
0
  {
2259
0
    if (q)
2260
0
      mpz_sub_ui (tq, tq, 1);
2261
0
    if (r)
2262
0
      mpz_add (tr, tr, d);
2263
0
  }
2264
1.68k
      else if (mode == GMP_DIV_CEIL && qs >= 0 && rn != 0)
2265
0
  {
2266
0
    if (q)
2267
0
      mpz_add_ui (tq, tq, 1);
2268
0
    if (r)
2269
0
      mpz_sub (tr, tr, d);
2270
0
  }
2271
2272
1.68k
      if (q)
2273
0
  {
2274
0
    mpz_swap (tq, q);
2275
0
    mpz_clear (tq);
2276
0
  }
2277
1.68k
      if (r)
2278
0
  mpz_swap (tr, r);
2279
2280
1.68k
      mpz_clear (tr);
2281
2282
1.68k
      return rn != 0;
2283
1.68k
    }
2284
1.68k
}
2285
2286
void
2287
mpz_cdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d)
2288
0
{
2289
0
  mpz_div_qr (q, r, n, d, GMP_DIV_CEIL);
2290
0
}
2291
2292
void
2293
mpz_fdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d)
2294
0
{
2295
0
  mpz_div_qr (q, r, n, d, GMP_DIV_FLOOR);
2296
0
}
2297
2298
void
2299
mpz_tdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d)
2300
0
{
2301
0
  mpz_div_qr (q, r, n, d, GMP_DIV_TRUNC);
2302
0
}
2303
2304
void
2305
mpz_cdiv_q (mpz_t q, const mpz_t n, const mpz_t d)
2306
0
{
2307
0
  mpz_div_qr (q, NULL, n, d, GMP_DIV_CEIL);
2308
0
}
2309
2310
void
2311
mpz_fdiv_q (mpz_t q, const mpz_t n, const mpz_t d)
2312
0
{
2313
0
  mpz_div_qr (q, NULL, n, d, GMP_DIV_FLOOR);
2314
0
}
2315
2316
void
2317
mpz_tdiv_q (mpz_t q, const mpz_t n, const mpz_t d)
2318
0
{
2319
0
  mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC);
2320
0
}
2321
2322
void
2323
mpz_cdiv_r (mpz_t r, const mpz_t n, const mpz_t d)
2324
0
{
2325
0
  mpz_div_qr (NULL, r, n, d, GMP_DIV_CEIL);
2326
0
}
2327
2328
void
2329
mpz_fdiv_r (mpz_t r, const mpz_t n, const mpz_t d)
2330
0
{
2331
0
  mpz_div_qr (NULL, r, n, d, GMP_DIV_FLOOR);
2332
0
}
2333
2334
void
2335
mpz_tdiv_r (mpz_t r, const mpz_t n, const mpz_t d)
2336
0
{
2337
0
  mpz_div_qr (NULL, r, n, d, GMP_DIV_TRUNC);
2338
0
}
2339
2340
void
2341
mpz_mod (mpz_t r, const mpz_t n, const mpz_t d)
2342
0
{
2343
0
  mpz_div_qr (NULL, r, n, d, d->_mp_size >= 0 ? GMP_DIV_FLOOR : GMP_DIV_CEIL);
2344
0
}
2345
2346
static void
2347
mpz_div_q_2exp (mpz_t q, const mpz_t u, mp_bitcnt_t bit_index,
2348
    enum mpz_div_round_mode mode)
2349
0
{
2350
0
  mp_size_t un, qn;
2351
0
  mp_size_t limb_cnt;
2352
0
  mp_ptr qp;
2353
0
  int adjust;
2354
2355
0
  un = u->_mp_size;
2356
0
  if (un == 0)
2357
0
    {
2358
0
      q->_mp_size = 0;
2359
0
      return;
2360
0
    }
2361
0
  limb_cnt = bit_index / GMP_LIMB_BITS;
2362
0
  qn = GMP_ABS (un) - limb_cnt;
2363
0
  bit_index %= GMP_LIMB_BITS;
2364
2365
0
  if (mode == ((un > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* un != 0 here. */
2366
    /* Note: Below, the final indexing at limb_cnt is valid because at
2367
       that point we have qn > 0. */
2368
0
    adjust = (qn <= 0
2369
0
        || !mpn_zero_p (u->_mp_d, limb_cnt)
2370
0
        || (u->_mp_d[limb_cnt]
2371
0
      & (((mp_limb_t) 1 << bit_index) - 1)));
2372
0
  else
2373
0
    adjust = 0;
2374
2375
0
  if (qn <= 0)
2376
0
    qn = 0;
2377
0
  else
2378
0
    {
2379
0
      qp = MPZ_REALLOC (q, qn);
2380
2381
0
      if (bit_index != 0)
2382
0
  {
2383
0
    mpn_rshift (qp, u->_mp_d + limb_cnt, qn, bit_index);
2384
0
    qn -= qp[qn - 1] == 0;
2385
0
  }
2386
0
      else
2387
0
  {
2388
0
    mpn_copyi (qp, u->_mp_d + limb_cnt, qn);
2389
0
  }
2390
0
    }
2391
2392
0
  q->_mp_size = qn;
2393
2394
0
  if (adjust)
2395
0
    mpz_add_ui (q, q, 1);
2396
0
  if (un < 0)
2397
0
    mpz_neg (q, q);
2398
0
}
2399
2400
static void
2401
mpz_div_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bit_index,
2402
    enum mpz_div_round_mode mode)
2403
0
{
2404
0
  mp_size_t us, un, rn;
2405
0
  mp_ptr rp;
2406
0
  mp_limb_t mask;
2407
2408
0
  us = u->_mp_size;
2409
0
  if (us == 0 || bit_index == 0)
2410
0
    {
2411
0
      r->_mp_size = 0;
2412
0
      return;
2413
0
    }
2414
0
  rn = (bit_index + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS;
2415
0
  assert (rn > 0);
2416
2417
0
  rp = MPZ_REALLOC (r, rn);
2418
0
  un = GMP_ABS (us);
2419
2420
0
  mask = GMP_LIMB_MAX >> (rn * GMP_LIMB_BITS - bit_index);
2421
2422
0
  if (rn > un)
2423
0
    {
2424
      /* Quotient (with truncation) is zero, and remainder is
2425
   non-zero */
2426
0
      if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */
2427
0
  {
2428
    /* Have to negate and sign extend. */
2429
0
    mp_size_t i;
2430
2431
0
    gmp_assert_nocarry (! mpn_neg (rp, u->_mp_d, un));
2432
0
    for (i = un; i < rn - 1; i++)
2433
0
      rp[i] = GMP_LIMB_MAX;
2434
2435
0
    rp[rn-1] = mask;
2436
0
    us = -us;
2437
0
  }
2438
0
      else
2439
0
  {
2440
    /* Just copy */
2441
0
    if (r != u)
2442
0
      mpn_copyi (rp, u->_mp_d, un);
2443
2444
0
    rn = un;
2445
0
  }
2446
0
    }
2447
0
  else
2448
0
    {
2449
0
      if (r != u)
2450
0
  mpn_copyi (rp, u->_mp_d, rn - 1);
2451
2452
0
      rp[rn-1] = u->_mp_d[rn-1] & mask;
2453
2454
0
      if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */
2455
0
  {
2456
    /* If r != 0, compute 2^{bit_count} - r. */
2457
0
    mpn_neg (rp, rp, rn);
2458
2459
0
    rp[rn-1] &= mask;
2460
2461
    /* us is not used for anything else, so we can modify it
2462
       here to indicate flipped sign. */
2463
0
    us = -us;
2464
0
  }
2465
0
    }
2466
0
  rn = mpn_normalized_size (rp, rn);
2467
0
  r->_mp_size = us < 0 ? -rn : rn;
2468
0
}
2469
2470
void
2471
mpz_cdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
2472
0
{
2473
0
  mpz_div_q_2exp (r, u, cnt, GMP_DIV_CEIL);
2474
0
}
2475
2476
void
2477
mpz_fdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
2478
0
{
2479
0
  mpz_div_q_2exp (r, u, cnt, GMP_DIV_FLOOR);
2480
0
}
2481
2482
void
2483
mpz_tdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
2484
0
{
2485
0
  mpz_div_q_2exp (r, u, cnt, GMP_DIV_TRUNC);
2486
0
}
2487
2488
void
2489
mpz_cdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
2490
0
{
2491
0
  mpz_div_r_2exp (r, u, cnt, GMP_DIV_CEIL);
2492
0
}
2493
2494
void
2495
mpz_fdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
2496
0
{
2497
0
  mpz_div_r_2exp (r, u, cnt, GMP_DIV_FLOOR);
2498
0
}
2499
2500
void
2501
mpz_tdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
2502
0
{
2503
0
  mpz_div_r_2exp (r, u, cnt, GMP_DIV_TRUNC);
2504
0
}
2505
2506
void
2507
mpz_divexact (mpz_t q, const mpz_t n, const mpz_t d)
2508
0
{
2509
0
  gmp_assert_nocarry (mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC));
2510
0
}
2511
2512
int
2513
mpz_divisible_p (const mpz_t n, const mpz_t d)
2514
1.68k
{
2515
1.68k
  return mpz_div_qr (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0;
2516
1.68k
}
2517
2518
int
2519
mpz_congruent_p (const mpz_t a, const mpz_t b, const mpz_t m)
2520
1.68k
{
2521
1.68k
  mpz_t t;
2522
1.68k
  int res;
2523
2524
  /* a == b (mod 0) iff a == b */
2525
1.68k
  if (mpz_sgn (m) == 0)
2526
0
    return (mpz_cmp (a, b) == 0);
2527
2528
1.68k
  mpz_init (t);
2529
1.68k
  mpz_sub (t, a, b);
2530
1.68k
  res = mpz_divisible_p (t, m);
2531
1.68k
  mpz_clear (t);
2532
2533
1.68k
  return res;
2534
1.68k
}
2535
2536
static unsigned long
2537
mpz_div_qr_ui (mpz_t q, mpz_t r,
2538
         const mpz_t n, unsigned long d, enum mpz_div_round_mode mode)
2539
0
{
2540
0
  unsigned long ret;
2541
0
  mpz_t rr, dd;
2542
2543
0
  mpz_init (rr);
2544
0
  mpz_init_set_ui (dd, d);
2545
0
  mpz_div_qr (q, rr, n, dd, mode);
2546
0
  mpz_clear (dd);
2547
0
  ret = mpz_get_ui (rr);
2548
2549
0
  if (r)
2550
0
    mpz_swap (r, rr);
2551
0
  mpz_clear (rr);
2552
2553
0
  return ret;
2554
0
}
2555
2556
unsigned long
2557
mpz_cdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d)
2558
0
{
2559
0
  return mpz_div_qr_ui (q, r, n, d, GMP_DIV_CEIL);
2560
0
}
2561
2562
unsigned long
2563
mpz_fdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d)
2564
0
{
2565
0
  return mpz_div_qr_ui (q, r, n, d, GMP_DIV_FLOOR);
2566
0
}
2567
2568
unsigned long
2569
mpz_tdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d)
2570
0
{
2571
0
  return mpz_div_qr_ui (q, r, n, d, GMP_DIV_TRUNC);
2572
0
}
2573
2574
unsigned long
2575
mpz_cdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d)
2576
0
{
2577
0
  return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_CEIL);
2578
0
}
2579
2580
unsigned long
2581
mpz_fdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d)
2582
0
{
2583
0
  return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_FLOOR);
2584
0
}
2585
2586
unsigned long
2587
mpz_tdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d)
2588
0
{
2589
0
  return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC);
2590
0
}
2591
2592
unsigned long
2593
mpz_cdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d)
2594
0
{
2595
0
  return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_CEIL);
2596
0
}
2597
unsigned long
2598
mpz_fdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d)
2599
0
{
2600
0
  return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR);
2601
0
}
2602
unsigned long
2603
mpz_tdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d)
2604
0
{
2605
0
  return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_TRUNC);
2606
0
}
2607
2608
unsigned long
2609
mpz_cdiv_ui (const mpz_t n, unsigned long d)
2610
0
{
2611
0
  return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_CEIL);
2612
0
}
2613
2614
unsigned long
2615
mpz_fdiv_ui (const mpz_t n, unsigned long d)
2616
0
{
2617
0
  return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_FLOOR);
2618
0
}
2619
2620
unsigned long
2621
mpz_tdiv_ui (const mpz_t n, unsigned long d)
2622
0
{
2623
0
  return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC);
2624
0
}
2625
2626
unsigned long
2627
mpz_mod_ui (mpz_t r, const mpz_t n, unsigned long d)
2628
0
{
2629
0
  return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR);
2630
0
}
2631
2632
void
2633
mpz_divexact_ui (mpz_t q, const mpz_t n, unsigned long d)
2634
0
{
2635
0
  gmp_assert_nocarry (mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC));
2636
0
}
2637
2638
int
2639
mpz_divisible_ui_p (const mpz_t n, unsigned long d)
2640
0
{
2641
0
  return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0;
2642
0
}
2643
2644

2645
/* GCD */
2646
static mp_limb_t
2647
mpn_gcd_11 (mp_limb_t u, mp_limb_t v)
2648
0
{
2649
0
  unsigned shift;
2650
2651
0
  assert ( (u | v) > 0);
2652
2653
0
  if (u == 0)
2654
0
    return v;
2655
0
  else if (v == 0)
2656
0
    return u;
2657
2658
0
  gmp_ctz (shift, u | v);
2659
2660
0
  u >>= shift;
2661
0
  v >>= shift;
2662
2663
0
  if ( (u & 1) == 0)
2664
0
    MP_LIMB_T_SWAP (u, v);
2665
2666
0
  while ( (v & 1) == 0)
2667
0
    v >>= 1;
2668
2669
0
  while (u != v)
2670
0
    {
2671
0
      if (u > v)
2672
0
  {
2673
0
    u -= v;
2674
0
    do
2675
0
      u >>= 1;
2676
0
    while ( (u & 1) == 0);
2677
0
  }
2678
0
      else
2679
0
  {
2680
0
    v -= u;
2681
0
    do
2682
0
      v >>= 1;
2683
0
    while ( (v & 1) == 0);
2684
0
  }
2685
0
    }
2686
0
  return u << shift;
2687
0
}
2688
2689
unsigned long
2690
mpz_gcd_ui (mpz_t g, const mpz_t u, unsigned long v)
2691
0
{
2692
0
  mpz_t t;
2693
0
  mpz_init_set_ui(t, v);
2694
0
  mpz_gcd (t, u, t);
2695
0
  if (v > 0)
2696
0
    v = mpz_get_ui (t);
2697
2698
0
  if (g)
2699
0
    mpz_swap (t, g);
2700
2701
0
  mpz_clear (t);
2702
2703
0
  return v;
2704
0
}
2705
2706
static mp_bitcnt_t
2707
mpz_make_odd (mpz_t r)
2708
0
{
2709
0
  mp_bitcnt_t shift;
2710
2711
0
  assert (r->_mp_size > 0);
2712
  /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */
2713
0
  shift = mpn_common_scan (r->_mp_d[0], 0, r->_mp_d, 0, 0);
2714
0
  mpz_tdiv_q_2exp (r, r, shift);
2715
2716
0
  return shift;
2717
0
}
2718
2719
void
2720
mpz_gcd (mpz_t g, const mpz_t u, const mpz_t v)
2721
0
{
2722
0
  mpz_t tu, tv;
2723
0
  mp_bitcnt_t uz, vz, gz;
2724
2725
0
  if (u->_mp_size == 0)
2726
0
    {
2727
0
      mpz_abs (g, v);
2728
0
      return;
2729
0
    }
2730
0
  if (v->_mp_size == 0)
2731
0
    {
2732
0
      mpz_abs (g, u);
2733
0
      return;
2734
0
    }
2735
2736
0
  mpz_init (tu);
2737
0
  mpz_init (tv);
2738
2739
0
  mpz_abs (tu, u);
2740
0
  uz = mpz_make_odd (tu);
2741
0
  mpz_abs (tv, v);
2742
0
  vz = mpz_make_odd (tv);
2743
0
  gz = GMP_MIN (uz, vz);
2744
2745
0
  if (tu->_mp_size < tv->_mp_size)
2746
0
    mpz_swap (tu, tv);
2747
2748
0
  mpz_tdiv_r (tu, tu, tv);
2749
0
  if (tu->_mp_size == 0)
2750
0
    {
2751
0
      mpz_swap (g, tv);
2752
0
    }
2753
0
  else
2754
0
    for (;;)
2755
0
      {
2756
0
  int c;
2757
2758
0
  mpz_make_odd (tu);
2759
0
  c = mpz_cmp (tu, tv);
2760
0
  if (c == 0)
2761
0
    {
2762
0
      mpz_swap (g, tu);
2763
0
      break;
2764
0
    }
2765
0
  if (c < 0)
2766
0
    mpz_swap (tu, tv);
2767
2768
0
  if (tv->_mp_size == 1)
2769
0
    {
2770
0
      mp_limb_t vl = tv->_mp_d[0];
2771
0
      mp_limb_t ul = mpz_tdiv_ui (tu, vl);
2772
0
      mpz_set_ui (g, mpn_gcd_11 (ul, vl));
2773
0
      break;
2774
0
    }
2775
0
  mpz_sub (tu, tu, tv);
2776
0
      }
2777
0
  mpz_clear (tu);
2778
0
  mpz_clear (tv);
2779
0
  mpz_mul_2exp (g, g, gz);
2780
0
}
2781
2782
void
2783
mpz_gcdext (mpz_t g, mpz_t s, mpz_t t, const mpz_t u, const mpz_t v)
2784
0
{
2785
0
  mpz_t tu, tv, s0, s1, t0, t1;
2786
0
  mp_bitcnt_t uz, vz, gz;
2787
0
  mp_bitcnt_t power;
2788
2789
0
  if (u->_mp_size == 0)
2790
0
    {
2791
      /* g = 0 u + sgn(v) v */
2792
0
      signed long sign = mpz_sgn (v);
2793
0
      mpz_abs (g, v);
2794
0
      if (s)
2795
0
  s->_mp_size = 0;
2796
0
      if (t)
2797
0
  mpz_set_si (t, sign);
2798
0
      return;
2799
0
    }
2800
2801
0
  if (v->_mp_size == 0)
2802
0
    {
2803
      /* g = sgn(u) u + 0 v */
2804
0
      signed long sign = mpz_sgn (u);
2805
0
      mpz_abs (g, u);
2806
0
      if (s)
2807
0
  mpz_set_si (s, sign);
2808
0
      if (t)
2809
0
  t->_mp_size = 0;
2810
0
      return;
2811
0
    }
2812
2813
0
  mpz_init (tu);
2814
0
  mpz_init (tv);
2815
0
  mpz_init (s0);
2816
0
  mpz_init (s1);
2817
0
  mpz_init (t0);
2818
0
  mpz_init (t1);
2819
2820
0
  mpz_abs (tu, u);
2821
0
  uz = mpz_make_odd (tu);
2822
0
  mpz_abs (tv, v);
2823
0
  vz = mpz_make_odd (tv);
2824
0
  gz = GMP_MIN (uz, vz);
2825
2826
0
  uz -= gz;
2827
0
  vz -= gz;
2828
2829
  /* Cofactors corresponding to odd gcd. gz handled later. */
2830
0
  if (tu->_mp_size < tv->_mp_size)
2831
0
    {
2832
0
      mpz_swap (tu, tv);
2833
0
      MPZ_SRCPTR_SWAP (u, v);
2834
0
      MPZ_PTR_SWAP (s, t);
2835
0
      MP_BITCNT_T_SWAP (uz, vz);
2836
0
    }
2837
2838
  /* Maintain
2839
   *
2840
   * u = t0 tu + t1 tv
2841
   * v = s0 tu + s1 tv
2842
   *
2843
   * where u and v denote the inputs with common factors of two
2844
   * eliminated, and det (s0, t0; s1, t1) = 2^p. Then
2845
   *
2846
   * 2^p tu =  s1 u - t1 v
2847
   * 2^p tv = -s0 u + t0 v
2848
   */
2849
2850
  /* After initial division, tu = q tv + tu', we have
2851
   *
2852
   * u = 2^uz (tu' + q tv)
2853
   * v = 2^vz tv
2854
   *
2855
   * or
2856
   *
2857
   * t0 = 2^uz, t1 = 2^uz q
2858
   * s0 = 0,    s1 = 2^vz
2859
   */
2860
2861
0
  mpz_setbit (t0, uz);
2862
0
  mpz_tdiv_qr (t1, tu, tu, tv);
2863
0
  mpz_mul_2exp (t1, t1, uz);
2864
2865
0
  mpz_setbit (s1, vz);
2866
0
  power = uz + vz;
2867
2868
0
  if (tu->_mp_size > 0)
2869
0
    {
2870
0
      mp_bitcnt_t shift;
2871
0
      shift = mpz_make_odd (tu);
2872
0
      mpz_mul_2exp (t0, t0, shift);
2873
0
      mpz_mul_2exp (s0, s0, shift);
2874
0
      power += shift;
2875
2876
0
      for (;;)
2877
0
  {
2878
0
    int c;
2879
0
    c = mpz_cmp (tu, tv);
2880
0
    if (c == 0)
2881
0
      break;
2882
2883
0
    if (c < 0)
2884
0
      {
2885
        /* tv = tv' + tu
2886
         *
2887
         * u = t0 tu + t1 (tv' + tu) = (t0 + t1) tu + t1 tv'
2888
         * v = s0 tu + s1 (tv' + tu) = (s0 + s1) tu + s1 tv' */
2889
2890
0
        mpz_sub (tv, tv, tu);
2891
0
        mpz_add (t0, t0, t1);
2892
0
        mpz_add (s0, s0, s1);
2893
2894
0
        shift = mpz_make_odd (tv);
2895
0
        mpz_mul_2exp (t1, t1, shift);
2896
0
        mpz_mul_2exp (s1, s1, shift);
2897
0
      }
2898
0
    else
2899
0
      {
2900
0
        mpz_sub (tu, tu, tv);
2901
0
        mpz_add (t1, t0, t1);
2902
0
        mpz_add (s1, s0, s1);
2903
2904
0
        shift = mpz_make_odd (tu);
2905
0
        mpz_mul_2exp (t0, t0, shift);
2906
0
        mpz_mul_2exp (s0, s0, shift);
2907
0
      }
2908
0
    power += shift;
2909
0
  }
2910
0
    }
2911
2912
  /* Now tv = odd part of gcd, and -s0 and t0 are corresponding
2913
     cofactors. */
2914
2915
0
  mpz_mul_2exp (tv, tv, gz);
2916
0
  mpz_neg (s0, s0);
2917
2918
  /* 2^p g = s0 u + t0 v. Eliminate one factor of two at a time. To
2919
     adjust cofactors, we need u / g and v / g */
2920
2921
0
  mpz_divexact (s1, v, tv);
2922
0
  mpz_abs (s1, s1);
2923
0
  mpz_divexact (t1, u, tv);
2924
0
  mpz_abs (t1, t1);
2925
2926
0
  while (power-- > 0)
2927
0
    {
2928
      /* s0 u + t0 v = (s0 - v/g) u - (t0 + u/g) v */
2929
0
      if (mpz_odd_p (s0) || mpz_odd_p (t0))
2930
0
  {
2931
0
    mpz_sub (s0, s0, s1);
2932
0
    mpz_add (t0, t0, t1);
2933
0
  }
2934
0
      assert (mpz_even_p (t0) && mpz_even_p (s0));
2935
0
      mpz_tdiv_q_2exp (s0, s0, 1);
2936
0
      mpz_tdiv_q_2exp (t0, t0, 1);
2937
0
    }
2938
2939
  /* Arrange so that |s| < |u| / 2g */
2940
0
  mpz_add (s1, s0, s1);
2941
0
  if (mpz_cmpabs (s0, s1) > 0)
2942
0
    {
2943
0
      mpz_swap (s0, s1);
2944
0
      mpz_sub (t0, t0, t1);
2945
0
    }
2946
0
  if (u->_mp_size < 0)
2947
0
    mpz_neg (s0, s0);
2948
0
  if (v->_mp_size < 0)
2949
0
    mpz_neg (t0, t0);
2950
2951
0
  mpz_swap (g, tv);
2952
0
  if (s)
2953
0
    mpz_swap (s, s0);
2954
0
  if (t)
2955
0
    mpz_swap (t, t0);
2956
2957
0
  mpz_clear (tu);
2958
0
  mpz_clear (tv);
2959
0
  mpz_clear (s0);
2960
0
  mpz_clear (s1);
2961
0
  mpz_clear (t0);
2962
0
  mpz_clear (t1);
2963
0
}
2964
2965
void
2966
mpz_lcm (mpz_t r, const mpz_t u, const mpz_t v)
2967
0
{
2968
0
  mpz_t g;
2969
2970
0
  if (u->_mp_size == 0 || v->_mp_size == 0)
2971
0
    {
2972
0
      r->_mp_size = 0;
2973
0
      return;
2974
0
    }
2975
2976
0
  mpz_init (g);
2977
2978
0
  mpz_gcd (g, u, v);
2979
0
  mpz_divexact (g, u, g);
2980
0
  mpz_mul (r, g, v);
2981
2982
0
  mpz_clear (g);
2983
0
  mpz_abs (r, r);
2984
0
}
2985
2986
void
2987
mpz_lcm_ui (mpz_t r, const mpz_t u, unsigned long v)
2988
0
{
2989
0
  if (v == 0 || u->_mp_size == 0)
2990
0
    {
2991
0
      r->_mp_size = 0;
2992
0
      return;
2993
0
    }
2994
2995
0
  v /= mpz_gcd_ui (NULL, u, v);
2996
0
  mpz_mul_ui (r, u, v);
2997
2998
0
  mpz_abs (r, r);
2999
0
}
3000
3001
int
3002
mpz_invert (mpz_t r, const mpz_t u, const mpz_t m)
3003
0
{
3004
0
  mpz_t g, tr;
3005
0
  int invertible;
3006
3007
0
  if (u->_mp_size == 0 || mpz_cmpabs_ui (m, 1) <= 0)
3008
0
    return 0;
3009
3010
0
  mpz_init (g);
3011
0
  mpz_init (tr);
3012
3013
0
  mpz_gcdext (g, tr, NULL, u, m);
3014
0
  invertible = (mpz_cmp_ui (g, 1) == 0);
3015
3016
0
  if (invertible)
3017
0
    {
3018
0
      if (tr->_mp_size < 0)
3019
0
  {
3020
0
    if (m->_mp_size >= 0)
3021
0
      mpz_add (tr, tr, m);
3022
0
    else
3023
0
      mpz_sub (tr, tr, m);
3024
0
  }
3025
0
      mpz_swap (r, tr);
3026
0
    }
3027
3028
0
  mpz_clear (g);
3029
0
  mpz_clear (tr);
3030
0
  return invertible;
3031
0
}
3032
3033

3034
/* Higher level operations (sqrt, pow and root) */
3035
3036
void
3037
mpz_pow_ui (mpz_t r, const mpz_t b, unsigned long e)
3038
0
{
3039
0
  unsigned long bit;
3040
0
  mpz_t tr;
3041
0
  mpz_init_set_ui (tr, 1);
3042
3043
0
  bit = GMP_ULONG_HIGHBIT;
3044
0
  do
3045
0
    {
3046
0
      mpz_mul (tr, tr, tr);
3047
0
      if (e & bit)
3048
0
  mpz_mul (tr, tr, b);
3049
0
      bit >>= 1;
3050
0
    }
3051
0
  while (bit > 0);
3052
3053
0
  mpz_swap (r, tr);
3054
0
  mpz_clear (tr);
3055
0
}
3056
3057
void
3058
mpz_ui_pow_ui (mpz_t r, unsigned long blimb, unsigned long e)
3059
0
{
3060
0
  mpz_t b;
3061
3062
0
  mpz_init_set_ui (b, blimb);
3063
0
  mpz_pow_ui (r, b, e);
3064
0
  mpz_clear (b);
3065
0
}
3066
3067
void
3068
mpz_powm (mpz_t r, const mpz_t b, const mpz_t e, const mpz_t m)
3069
0
{
3070
0
  mpz_t tr;
3071
0
  mpz_t base;
3072
0
  mp_size_t en, mn;
3073
0
  mp_srcptr mp;
3074
0
  struct gmp_div_inverse minv;
3075
0
  unsigned shift;
3076
0
  mp_ptr tp = NULL;
3077
3078
0
  en = GMP_ABS (e->_mp_size);
3079
0
  mn = GMP_ABS (m->_mp_size);
3080
0
  if (mn == 0)
3081
0
    gmp_die ("mpz_powm: Zero modulo.");
3082
3083
0
  if (en == 0)
3084
0
    {
3085
0
      mpz_set_ui (r, 1);
3086
0
      return;
3087
0
    }
3088
3089
0
  mp = m->_mp_d;
3090
0
  mpn_div_qr_invert (&minv, mp, mn);
3091
0
  shift = minv.shift;
3092
3093
0
  if (shift > 0)
3094
0
    {
3095
      /* To avoid shifts, we do all our reductions, except the final
3096
   one, using a *normalized* m. */
3097
0
      minv.shift = 0;
3098
3099
0
      tp = gmp_xalloc_limbs (mn);
3100
0
      gmp_assert_nocarry (mpn_lshift (tp, mp, mn, shift));
3101
0
      mp = tp;
3102
0
    }
3103
3104
0
  mpz_init (base);
3105
3106
0
  if (e->_mp_size < 0)
3107
0
    {
3108
0
      if (!mpz_invert (base, b, m))
3109
0
  gmp_die ("mpz_powm: Negative exponent and non-invertible base.");
3110
0
    }
3111
0
  else
3112
0
    {
3113
0
      mp_size_t bn;
3114
0
      mpz_abs (base, b);
3115
3116
0
      bn = base->_mp_size;
3117
0
      if (bn >= mn)
3118
0
  {
3119
0
    mpn_div_qr_preinv (NULL, base->_mp_d, base->_mp_size, mp, mn, &minv);
3120
0
    bn = mn;
3121
0
  }
3122
3123
      /* We have reduced the absolute value. Now take care of the
3124
   sign. Note that we get zero represented non-canonically as
3125
   m. */
3126
0
      if (b->_mp_size < 0)
3127
0
  {
3128
0
    mp_ptr bp = MPZ_REALLOC (base, mn);
3129
0
    gmp_assert_nocarry (mpn_sub (bp, mp, mn, bp, bn));
3130
0
    bn = mn;
3131
0
  }
3132
0
      base->_mp_size = mpn_normalized_size (base->_mp_d, bn);
3133
0
    }
3134
0
  mpz_init_set_ui (tr, 1);
3135
3136
0
  while (--en >= 0)
3137
0
    {
3138
0
      mp_limb_t w = e->_mp_d[en];
3139
0
      mp_limb_t bit;
3140
3141
0
      bit = GMP_LIMB_HIGHBIT;
3142
0
      do
3143
0
  {
3144
0
    mpz_mul (tr, tr, tr);
3145
0
    if (w & bit)
3146
0
      mpz_mul (tr, tr, base);
3147
0
    if (tr->_mp_size > mn)
3148
0
      {
3149
0
        mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv);
3150
0
        tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn);
3151
0
      }
3152
0
    bit >>= 1;
3153
0
  }
3154
0
      while (bit > 0);
3155
0
    }
3156
3157
  /* Final reduction */
3158
0
  if (tr->_mp_size >= mn)
3159
0
    {
3160
0
      minv.shift = shift;
3161
0
      mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv);
3162
0
      tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn);
3163
0
    }
3164
0
  if (tp)
3165
0
    gmp_free (tp);
3166
3167
0
  mpz_swap (r, tr);
3168
0
  mpz_clear (tr);
3169
0
  mpz_clear (base);
3170
0
}
3171
3172
void
3173
mpz_powm_ui (mpz_t r, const mpz_t b, unsigned long elimb, const mpz_t m)
3174
0
{
3175
0
  mpz_t e;
3176
3177
0
  mpz_init_set_ui (e, elimb);
3178
0
  mpz_powm (r, b, e, m);
3179
0
  mpz_clear (e);
3180
0
}
3181
3182
/* x=trunc(y^(1/z)), r=y-x^z */
3183
void
3184
mpz_rootrem (mpz_t x, mpz_t r, const mpz_t y, unsigned long z)
3185
0
{
3186
0
  int sgn;
3187
0
  mpz_t t, u;
3188
3189
0
  sgn = y->_mp_size < 0;
3190
0
  if ((~z & sgn) != 0)
3191
0
    gmp_die ("mpz_rootrem: Negative argument, with even root.");
3192
0
  if (z == 0)
3193
0
    gmp_die ("mpz_rootrem: Zeroth root.");
3194
3195
0
  if (mpz_cmpabs_ui (y, 1) <= 0) {
3196
0
    if (x)
3197
0
      mpz_set (x, y);
3198
0
    if (r)
3199
0
      r->_mp_size = 0;
3200
0
    return;
3201
0
  }
3202
3203
0
  mpz_init (u);
3204
0
  mpz_init (t);
3205
0
  mpz_setbit (t, mpz_sizeinbase (y, 2) / z + 1);
3206
3207
0
  if (z == 2) /* simplify sqrt loop: z-1 == 1 */
3208
0
    do {
3209
0
      mpz_swap (u, t);      /* u = x */
3210
0
      mpz_tdiv_q (t, y, u);   /* t = y/x */
3211
0
      mpz_add (t, t, u);    /* t = y/x + x */
3212
0
      mpz_tdiv_q_2exp (t, t, 1);  /* x'= (y/x + x)/2 */
3213
0
    } while (mpz_cmpabs (t, u) < 0);  /* |x'| < |x| */
3214
0
  else /* z != 2 */ {
3215
0
    mpz_t v;
3216
3217
0
    mpz_init (v);
3218
0
    if (sgn)
3219
0
      mpz_neg (t, t);
3220
3221
0
    do {
3222
0
      mpz_swap (u, t);      /* u = x */
3223
0
      mpz_pow_ui (t, u, z - 1);   /* t = x^(z-1) */
3224
0
      mpz_tdiv_q (t, y, t);   /* t = y/x^(z-1) */
3225
0
      mpz_mul_ui (v, u, z - 1);   /* v = x*(z-1) */
3226
0
      mpz_add (t, t, v);    /* t = y/x^(z-1) + x*(z-1) */
3227
0
      mpz_tdiv_q_ui (t, t, z);    /* x'=(y/x^(z-1) + x*(z-1))/z */
3228
0
    } while (mpz_cmpabs (t, u) < 0);  /* |x'| < |x| */
3229
3230
0
    mpz_clear (v);
3231
0
  }
3232
3233
0
  if (r) {
3234
0
    mpz_pow_ui (t, u, z);
3235
0
    mpz_sub (r, y, t);
3236
0
  }
3237
0
  if (x)
3238
0
    mpz_swap (x, u);
3239
0
  mpz_clear (u);
3240
0
  mpz_clear (t);
3241
0
}
3242
3243
int
3244
mpz_root (mpz_t x, const mpz_t y, unsigned long z)
3245
0
{
3246
0
  int res;
3247
0
  mpz_t r;
3248
3249
0
  mpz_init (r);
3250
0
  mpz_rootrem (x, r, y, z);
3251
0
  res = r->_mp_size == 0;
3252
0
  mpz_clear (r);
3253
3254
0
  return res;
3255
0
}
3256
3257
/* Compute s = floor(sqrt(u)) and r = u - s^2. Allows r == NULL */
3258
void
3259
mpz_sqrtrem (mpz_t s, mpz_t r, const mpz_t u)
3260
0
{
3261
0
  mpz_rootrem (s, r, u, 2);
3262
0
}
3263
3264
void
3265
mpz_sqrt (mpz_t s, const mpz_t u)
3266
0
{
3267
0
  mpz_rootrem (s, NULL, u, 2);
3268
0
}
3269
3270
int
3271
mpz_perfect_square_p (const mpz_t u)
3272
0
{
3273
0
  if (u->_mp_size <= 0)
3274
0
    return (u->_mp_size == 0);
3275
0
  else
3276
0
    return mpz_root (NULL, u, 2);
3277
0
}
3278
3279
int
3280
mpn_perfect_square_p (mp_srcptr p, mp_size_t n)
3281
0
{
3282
0
  mpz_t t;
3283
3284
0
  assert (n > 0);
3285
0
  assert (p [n-1] != 0);
3286
0
  return mpz_root (NULL, mpz_roinit_normal_n (t, p, n), 2);
3287
0
}
3288
3289
mp_size_t
3290
mpn_sqrtrem (mp_ptr sp, mp_ptr rp, mp_srcptr p, mp_size_t n)
3291
0
{
3292
0
  mpz_t s, r, u;
3293
0
  mp_size_t res;
3294
3295
0
  assert (n > 0);
3296
0
  assert (p [n-1] != 0);
3297
3298
0
  mpz_init (r);
3299
0
  mpz_init (s);
3300
0
  mpz_rootrem (s, r, mpz_roinit_normal_n (u, p, n), 2);
3301
3302
0
  assert (s->_mp_size == (n+1)/2);
3303
0
  mpn_copyd (sp, s->_mp_d, s->_mp_size);
3304
0
  mpz_clear (s);
3305
0
  res = r->_mp_size;
3306
0
  if (rp)
3307
0
    mpn_copyd (rp, r->_mp_d, res);
3308
0
  mpz_clear (r);
3309
0
  return res;
3310
0
}
3311

3312
/* Combinatorics */
3313
3314
void
3315
mpz_mfac_uiui (mpz_t x, unsigned long n, unsigned long m)
3316
0
{
3317
0
  mpz_set_ui (x, n + (n == 0));
3318
0
  if (m + 1 < 2) return;
3319
0
  while (n > m + 1)
3320
0
    mpz_mul_ui (x, x, n -= m);
3321
0
}
3322
3323
void
3324
mpz_2fac_ui (mpz_t x, unsigned long n)
3325
0
{
3326
0
  mpz_mfac_uiui (x, n, 2);
3327
0
}
3328
3329
void
3330
mpz_fac_ui (mpz_t x, unsigned long n)
3331
0
{
3332
0
  mpz_mfac_uiui (x, n, 1);
3333
0
}
3334
3335
void
3336
mpz_bin_uiui (mpz_t r, unsigned long n, unsigned long k)
3337
0
{
3338
0
  mpz_t t;
3339
3340
0
  mpz_set_ui (r, k <= n);
3341
3342
0
  if (k > (n >> 1))
3343
0
    k = (k <= n) ? n - k : 0;
3344
3345
0
  mpz_init (t);
3346
0
  mpz_fac_ui (t, k);
3347
3348
0
  for (; k > 0; --k)
3349
0
    mpz_mul_ui (r, r, n--);
3350
3351
0
  mpz_divexact (r, r, t);
3352
0
  mpz_clear (t);
3353
0
}
3354
3355

3356
/* Primality testing */
3357
3358
/* Computes Kronecker (a/b) with odd b, a!=0 and GCD(a,b) = 1 */
3359
/* Adapted from JACOBI_BASE_METHOD==4 in mpn/generic/jacbase.c */
3360
static int
3361
gmp_jacobi_coprime (mp_limb_t a, mp_limb_t b)
3362
0
{
3363
0
  int c, bit = 0;
3364
3365
0
  assert (b & 1);
3366
0
  assert (a != 0);
3367
  /* assert (mpn_gcd_11 (a, b) == 1); */
3368
3369
  /* Below, we represent a and b shifted right so that the least
3370
     significant one bit is implicit. */
3371
0
  b >>= 1;
3372
3373
0
  gmp_ctz(c, a);
3374
0
  a >>= 1;
3375
3376
0
  for (;;)
3377
0
    {
3378
0
      a >>= c;
3379
      /* (2/b) = -1 if b = 3 or 5 mod 8 */
3380
0
      bit ^= c & (b ^ (b >> 1));
3381
0
      if (a < b)
3382
0
  {
3383
0
    if (a == 0)
3384
0
      return bit & 1 ? -1 : 1;
3385
0
    bit ^= a & b;
3386
0
    a = b - a;
3387
0
    b -= a;
3388
0
  }
3389
0
      else
3390
0
  {
3391
0
    a -= b;
3392
0
    assert (a != 0);
3393
0
  }
3394
3395
0
      gmp_ctz(c, a);
3396
0
      ++c;
3397
0
    }
3398
0
}
3399
3400
static void
3401
gmp_lucas_step_k_2k (mpz_t V, mpz_t Qk, const mpz_t n)
3402
0
{
3403
0
  mpz_mod (Qk, Qk, n);
3404
  /* V_{2k} <- V_k ^ 2 - 2Q^k */
3405
0
  mpz_mul (V, V, V);
3406
0
  mpz_submul_ui (V, Qk, 2);
3407
0
  mpz_tdiv_r (V, V, n);
3408
  /* Q^{2k} = (Q^k)^2 */
3409
0
  mpz_mul (Qk, Qk, Qk);
3410
0
}
3411
3412
/* Computes V_k, Q^k (mod n) for the Lucas' sequence */
3413
/* with P=1, Q=Q; k = (n>>b0)|1. */
3414
/* Requires an odd n > 4; b0 > 0; -2*Q must not overflow a long */
3415
/* Returns (U_k == 0) and sets V=V_k and Qk=Q^k. */
3416
static int
3417
gmp_lucas_mod (mpz_t V, mpz_t Qk, long Q,
3418
         mp_bitcnt_t b0, const mpz_t n)
3419
0
{
3420
0
  mp_bitcnt_t bs;
3421
0
  mpz_t U;
3422
0
  int res;
3423
3424
0
  assert (b0 > 0);
3425
0
  assert (Q <= - (LONG_MIN / 2));
3426
0
  assert (Q >= - (LONG_MAX / 2));
3427
0
  assert (mpz_cmp_ui (n, 4) > 0);
3428
0
  assert (mpz_odd_p (n));
3429
3430
0
  mpz_init_set_ui (U, 1); /* U1 = 1 */
3431
0
  mpz_set_ui (V, 1); /* V1 = 1 */
3432
0
  mpz_set_si (Qk, Q);
3433
3434
0
  for (bs = mpz_sizeinbase (n, 2) - 1; --bs >= b0;)
3435
0
    {
3436
      /* U_{2k} <- U_k * V_k */
3437
0
      mpz_mul (U, U, V);
3438
      /* V_{2k} <- V_k ^ 2 - 2Q^k */
3439
      /* Q^{2k} = (Q^k)^2 */
3440
0
      gmp_lucas_step_k_2k (V, Qk, n);
3441
3442
      /* A step k->k+1 is performed if the bit in $n$ is 1  */
3443
      /* mpz_tstbit(n,bs) or the bit is 0 in $n$ but  */
3444
      /* should be 1 in $n+1$ (bs == b0)      */
3445
0
      if (b0 == bs || mpz_tstbit (n, bs))
3446
0
  {
3447
    /* Q^{k+1} <- Q^k * Q */
3448
0
    mpz_mul_si (Qk, Qk, Q);
3449
    /* U_{k+1} <- (U_k + V_k) / 2 */
3450
0
    mpz_swap (U, V); /* Keep in V the old value of U_k */
3451
0
    mpz_add (U, U, V);
3452
    /* We have to compute U/2, so we need an even value, */
3453
    /* equivalent (mod n) */
3454
0
    if (mpz_odd_p (U))
3455
0
      mpz_add (U, U, n);
3456
0
    mpz_tdiv_q_2exp (U, U, 1);
3457
    /* V_{k+1} <-(D*U_k + V_k) / 2 =
3458
      U_{k+1} + (D-1)/2*U_k = U_{k+1} - 2Q*U_k */
3459
0
    mpz_mul_si (V, V, -2*Q);
3460
0
    mpz_add (V, U, V);
3461
0
    mpz_tdiv_r (V, V, n);
3462
0
  }
3463
0
      mpz_tdiv_r (U, U, n);
3464
0
    }
3465
3466
0
  res = U->_mp_size == 0;
3467
0
  mpz_clear (U);
3468
0
  return res;
3469
0
}
3470
3471
/* Performs strong Lucas' test on x, with parameters suggested */
3472
/* for the BPSW test. Qk is only passed to recycle a variable. */
3473
/* Requires GCD (x,6) = 1.*/
3474
static int
3475
gmp_stronglucas (const mpz_t x, mpz_t Qk)
3476
0
{
3477
0
  mp_bitcnt_t b0;
3478
0
  mpz_t V, n;
3479
0
  mp_limb_t maxD, D; /* The absolute value is stored. */
3480
0
  long Q;
3481
0
  mp_limb_t tl;
3482
3483
  /* Test on the absolute value. */
3484
0
  mpz_roinit_normal_n (n, x->_mp_d, GMP_ABS (x->_mp_size));
3485
3486
0
  assert (mpz_odd_p (n));
3487
  /* assert (mpz_gcd_ui (NULL, n, 6) == 1); */
3488
0
  if (mpz_root (Qk, n, 2))
3489
0
    return 0; /* A square is composite. */
3490
3491
  /* Check Ds up to square root (in case, n is prime)
3492
     or avoid overflows */
3493
0
  maxD = (Qk->_mp_size == 1) ? Qk->_mp_d [0] - 1 : GMP_LIMB_MAX;
3494
3495
0
  D = 3;
3496
  /* Search a D such that (D/n) = -1 in the sequence 5,-7,9,-11,.. */
3497
  /* For those Ds we have (D/n) = (n/|D|) */
3498
0
  do
3499
0
    {
3500
0
      if (D >= maxD)
3501
0
  return 1 + (D != GMP_LIMB_MAX); /* (1 + ! ~ D) */
3502
0
      D += 2;
3503
0
      tl = mpz_tdiv_ui (n, D);
3504
0
      if (tl == 0)
3505
0
  return 0;
3506
0
    }
3507
0
  while (gmp_jacobi_coprime (tl, D) == 1);
3508
3509
0
  mpz_init (V);
3510
3511
  /* n-(D/n) = n+1 = d*2^{b0}, with d = (n>>b0) | 1 */
3512
0
  b0 = mpz_scan0 (n, 0);
3513
3514
  /* D= P^2 - 4Q; P = 1; Q = (1-D)/4 */
3515
0
  Q = (D & 2) ? (long) (D >> 2) + 1 : -(long) (D >> 2);
3516
3517
0
  if (! gmp_lucas_mod (V, Qk, Q, b0, n)) /* If Ud != 0 */
3518
0
    while (V->_mp_size != 0 && --b0 != 0) /* while Vk != 0 */
3519
      /* V <- V ^ 2 - 2Q^k */
3520
      /* Q^{2k} = (Q^k)^2 */
3521
0
      gmp_lucas_step_k_2k (V, Qk, n);
3522
3523
0
  mpz_clear (V);
3524
0
  return (b0 != 0);
3525
0
}
3526
3527
static int
3528
gmp_millerrabin (const mpz_t n, const mpz_t nm1, mpz_t y,
3529
     const mpz_t q, mp_bitcnt_t k)
3530
0
{
3531
0
  assert (k > 0);
3532
3533
  /* Caller must initialize y to the base. */
3534
0
  mpz_powm (y, y, q, n);
3535
3536
0
  if (mpz_cmp_ui (y, 1) == 0 || mpz_cmp (y, nm1) == 0)
3537
0
    return 1;
3538
3539
0
  while (--k > 0)
3540
0
    {
3541
0
      mpz_powm_ui (y, y, 2, n);
3542
0
      if (mpz_cmp (y, nm1) == 0)
3543
0
  return 1;
3544
      /* y == 1 means that the previous y was a non-trivial square root
3545
   of 1 (mod n). y == 0 means that n is a power of the base.
3546
   In either case, n is not prime. */
3547
0
      if (mpz_cmp_ui (y, 1) <= 0)
3548
0
  return 0;
3549
0
    }
3550
0
  return 0;
3551
0
}
3552
3553
/* This product is 0xc0cfd797, and fits in 32 bits. */
3554
#define GMP_PRIME_PRODUCT \
3555
0
  (3UL*5UL*7UL*11UL*13UL*17UL*19UL*23UL*29UL)
3556
3557
/* Bit (p+1)/2 is set, for each odd prime <= 61 */
3558
0
#define GMP_PRIME_MASK 0xc96996dcUL
3559
3560
int
3561
mpz_probab_prime_p (const mpz_t n, int reps)
3562
0
{
3563
0
  mpz_t nm1;
3564
0
  mpz_t q;
3565
0
  mpz_t y;
3566
0
  mp_bitcnt_t k;
3567
0
  int is_prime;
3568
0
  int j;
3569
3570
  /* Note that we use the absolute value of n only, for compatibility
3571
     with the real GMP. */
3572
0
  if (mpz_even_p (n))
3573
0
    return (mpz_cmpabs_ui (n, 2) == 0) ? 2 : 0;
3574
3575
  /* Above test excludes n == 0 */
3576
0
  assert (n->_mp_size != 0);
3577
3578
0
  if (mpz_cmpabs_ui (n, 64) < 0)
3579
0
    return (GMP_PRIME_MASK >> (n->_mp_d[0] >> 1)) & 2;
3580
3581
0
  if (mpz_gcd_ui (NULL, n, GMP_PRIME_PRODUCT) != 1)
3582
0
    return 0;
3583
3584
  /* All prime factors are >= 31. */
3585
0
  if (mpz_cmpabs_ui (n, 31*31) < 0)
3586
0
    return 2;
3587
3588
0
  mpz_init (nm1);
3589
0
  mpz_init (q);
3590
3591
  /* Find q and k, where q is odd and n = 1 + 2**k * q.  */
3592
0
  mpz_abs (nm1, n);
3593
0
  nm1->_mp_d[0] -= 1;
3594
0
  k = mpz_scan1 (nm1, 0);
3595
0
  mpz_tdiv_q_2exp (q, nm1, k);
3596
3597
  /* BPSW test */
3598
0
  mpz_init_set_ui (y, 2);
3599
0
  is_prime = gmp_millerrabin (n, nm1, y, q, k) && gmp_stronglucas (n, y);
3600
0
  reps -= 24; /* skip the first 24 repetitions */
3601
3602
  /* Use Miller-Rabin, with a deterministic sequence of bases, a[j] =
3603
     j^2 + j + 41 using Euler's polynomial. We potentially stop early,
3604
     if a[j] >= n - 1. Since n >= 31*31, this can happen only if reps >
3605
     30 (a[30] == 971 > 31*31 == 961). */
3606
3607
0
  for (j = 0; is_prime & (j < reps); j++)
3608
0
    {
3609
0
      mpz_set_ui (y, (unsigned long) j*j+j+41);
3610
0
      if (mpz_cmp (y, nm1) >= 0)
3611
0
  {
3612
    /* Don't try any further bases. This "early" break does not affect
3613
       the result for any reasonable reps value (<=5000 was tested) */
3614
0
    assert (j >= 30);
3615
0
    break;
3616
0
  }
3617
0
      is_prime = gmp_millerrabin (n, nm1, y, q, k);
3618
0
    }
3619
0
  mpz_clear (nm1);
3620
0
  mpz_clear (q);
3621
0
  mpz_clear (y);
3622
3623
0
  return is_prime;
3624
0
}
3625
3626

3627
/* Logical operations and bit manipulation. */
3628
3629
/* Numbers are treated as if represented in two's complement (and
3630
   infinitely sign extended). For a negative values we get the two's
3631
   complement from -x = ~x + 1, where ~ is bitwise complement.
3632
   Negation transforms
3633
3634
     xxxx10...0
3635
3636
   into
3637
3638
     yyyy10...0
3639
3640
   where yyyy is the bitwise complement of xxxx. So least significant
3641
   bits, up to and including the first one bit, are unchanged, and
3642
   the more significant bits are all complemented.
3643
3644
   To change a bit from zero to one in a negative number, subtract the
3645
   corresponding power of two from the absolute value. This can never
3646
   underflow. To change a bit from one to zero, add the corresponding
3647
   power of two, and this might overflow. E.g., if x = -001111, the
3648
   two's complement is 110001. Clearing the least significant bit, we
3649
   get two's complement 110000, and -010000. */
3650
3651
int
3652
mpz_tstbit (const mpz_t d, mp_bitcnt_t bit_index)
3653
0
{
3654
0
  mp_size_t limb_index;
3655
0
  unsigned shift;
3656
0
  mp_size_t ds;
3657
0
  mp_size_t dn;
3658
0
  mp_limb_t w;
3659
0
  int bit;
3660
3661
0
  ds = d->_mp_size;
3662
0
  dn = GMP_ABS (ds);
3663
0
  limb_index = bit_index / GMP_LIMB_BITS;
3664
0
  if (limb_index >= dn)
3665
0
    return ds < 0;
3666
3667
0
  shift = bit_index % GMP_LIMB_BITS;
3668
0
  w = d->_mp_d[limb_index];
3669
0
  bit = (w >> shift) & 1;
3670
3671
0
  if (ds < 0)
3672
0
    {
3673
      /* d < 0. Check if any of the bits below is set: If so, our bit
3674
   must be complemented. */
3675
0
      if (shift > 0 && (mp_limb_t) (w << (GMP_LIMB_BITS - shift)) > 0)
3676
0
  return bit ^ 1;
3677
0
      while (--limb_index >= 0)
3678
0
  if (d->_mp_d[limb_index] > 0)
3679
0
    return bit ^ 1;
3680
0
    }
3681
0
  return bit;
3682
0
}
3683
3684
static void
3685
mpz_abs_add_bit (mpz_t d, mp_bitcnt_t bit_index)
3686
0
{
3687
0
  mp_size_t dn, limb_index;
3688
0
  mp_limb_t bit;
3689
0
  mp_ptr dp;
3690
3691
0
  dn = GMP_ABS (d->_mp_size);
3692
3693
0
  limb_index = bit_index / GMP_LIMB_BITS;
3694
0
  bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS);
3695
3696
0
  if (limb_index >= dn)
3697
0
    {
3698
0
      mp_size_t i;
3699
      /* The bit should be set outside of the end of the number.
3700
   We have to increase the size of the number. */
3701
0
      dp = MPZ_REALLOC (d, limb_index + 1);
3702
3703
0
      dp[limb_index] = bit;
3704
0
      for (i = dn; i < limb_index; i++)
3705
0
  dp[i] = 0;
3706
0
      dn = limb_index + 1;
3707
0
    }
3708
0
  else
3709
0
    {
3710
0
      mp_limb_t cy;
3711
3712
0
      dp = d->_mp_d;
3713
3714
0
      cy = mpn_add_1 (dp + limb_index, dp + limb_index, dn - limb_index, bit);
3715
0
      if (cy > 0)
3716
0
  {
3717
0
    dp = MPZ_REALLOC (d, dn + 1);
3718
0
    dp[dn++] = cy;
3719
0
  }
3720
0
    }
3721
3722
0
  d->_mp_size = (d->_mp_size < 0) ? - dn : dn;
3723
0
}
3724
3725
static void
3726
mpz_abs_sub_bit (mpz_t d, mp_bitcnt_t bit_index)
3727
0
{
3728
0
  mp_size_t dn, limb_index;
3729
0
  mp_ptr dp;
3730
0
  mp_limb_t bit;
3731
3732
0
  dn = GMP_ABS (d->_mp_size);
3733
0
  dp = d->_mp_d;
3734
3735
0
  limb_index = bit_index / GMP_LIMB_BITS;
3736
0
  bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS);
3737
3738
0
  assert (limb_index < dn);
3739
3740
0
  gmp_assert_nocarry (mpn_sub_1 (dp + limb_index, dp + limb_index,
3741
0
         dn - limb_index, bit));
3742
0
  dn = mpn_normalized_size (dp, dn);
3743
0
  d->_mp_size = (d->_mp_size < 0) ? - dn : dn;
3744
0
}
3745
3746
void
3747
mpz_setbit (mpz_t d, mp_bitcnt_t bit_index)
3748
0
{
3749
0
  if (!mpz_tstbit (d, bit_index))
3750
0
    {
3751
0
      if (d->_mp_size >= 0)
3752
0
  mpz_abs_add_bit (d, bit_index);
3753
0
      else
3754
0
  mpz_abs_sub_bit (d, bit_index);
3755
0
    }
3756
0
}
3757
3758
void
3759
mpz_clrbit (mpz_t d, mp_bitcnt_t bit_index)
3760
0
{
3761
0
  if (mpz_tstbit (d, bit_index))
3762
0
    {
3763
0
      if (d->_mp_size >= 0)
3764
0
  mpz_abs_sub_bit (d, bit_index);
3765
0
      else
3766
0
  mpz_abs_add_bit (d, bit_index);
3767
0
    }
3768
0
}
3769
3770
void
3771
mpz_combit (mpz_t d, mp_bitcnt_t bit_index)
3772
0
{
3773
0
  if (mpz_tstbit (d, bit_index) ^ (d->_mp_size < 0))
3774
0
    mpz_abs_sub_bit (d, bit_index);
3775
0
  else
3776
0
    mpz_abs_add_bit (d, bit_index);
3777
0
}
3778
3779
void
3780
mpz_com (mpz_t r, const mpz_t u)
3781
0
{
3782
0
  mpz_add_ui (r, u, 1);
3783
0
  mpz_neg (r, r);
3784
0
}
3785
3786
void
3787
mpz_and (mpz_t r, const mpz_t u, const mpz_t v)
3788
0
{
3789
0
  mp_size_t un, vn, rn, i;
3790
0
  mp_ptr up, vp, rp;
3791
3792
0
  mp_limb_t ux, vx, rx;
3793
0
  mp_limb_t uc, vc, rc;
3794
0
  mp_limb_t ul, vl, rl;
3795
3796
0
  un = GMP_ABS (u->_mp_size);
3797
0
  vn = GMP_ABS (v->_mp_size);
3798
0
  if (un < vn)
3799
0
    {
3800
0
      MPZ_SRCPTR_SWAP (u, v);
3801
0
      MP_SIZE_T_SWAP (un, vn);
3802
0
    }
3803
0
  if (vn == 0)
3804
0
    {
3805
0
      r->_mp_size = 0;
3806
0
      return;
3807
0
    }
3808
3809
0
  uc = u->_mp_size < 0;
3810
0
  vc = v->_mp_size < 0;
3811
0
  rc = uc & vc;
3812
3813
0
  ux = -uc;
3814
0
  vx = -vc;
3815
0
  rx = -rc;
3816
3817
  /* If the smaller input is positive, higher limbs don't matter. */
3818
0
  rn = vx ? un : vn;
3819
3820
0
  rp = MPZ_REALLOC (r, rn + (mp_size_t) rc);
3821
3822
0
  up = u->_mp_d;
3823
0
  vp = v->_mp_d;
3824
3825
0
  i = 0;
3826
0
  do
3827
0
    {
3828
0
      ul = (up[i] ^ ux) + uc;
3829
0
      uc = ul < uc;
3830
3831
0
      vl = (vp[i] ^ vx) + vc;
3832
0
      vc = vl < vc;
3833
3834
0
      rl = ( (ul & vl) ^ rx) + rc;
3835
0
      rc = rl < rc;
3836
0
      rp[i] = rl;
3837
0
    }
3838
0
  while (++i < vn);
3839
0
  assert (vc == 0);
3840
3841
0
  for (; i < rn; i++)
3842
0
    {
3843
0
      ul = (up[i] ^ ux) + uc;
3844
0
      uc = ul < uc;
3845
3846
0
      rl = ( (ul & vx) ^ rx) + rc;
3847
0
      rc = rl < rc;
3848
0
      rp[i] = rl;
3849
0
    }
3850
0
  if (rc)
3851
0
    rp[rn++] = rc;
3852
0
  else
3853
0
    rn = mpn_normalized_size (rp, rn);
3854
3855
0
  r->_mp_size = rx ? -rn : rn;
3856
0
}
3857
3858
void
3859
mpz_ior (mpz_t r, const mpz_t u, const mpz_t v)
3860
0
{
3861
0
  mp_size_t un, vn, rn, i;
3862
0
  mp_ptr up, vp, rp;
3863
3864
0
  mp_limb_t ux, vx, rx;
3865
0
  mp_limb_t uc, vc, rc;
3866
0
  mp_limb_t ul, vl, rl;
3867
3868
0
  un = GMP_ABS (u->_mp_size);
3869
0
  vn = GMP_ABS (v->_mp_size);
3870
0
  if (un < vn)
3871
0
    {
3872
0
      MPZ_SRCPTR_SWAP (u, v);
3873
0
      MP_SIZE_T_SWAP (un, vn);
3874
0
    }
3875
0
  if (vn == 0)
3876
0
    {
3877
0
      mpz_set (r, u);
3878
0
      return;
3879
0
    }
3880
3881
0
  uc = u->_mp_size < 0;
3882
0
  vc = v->_mp_size < 0;
3883
0
  rc = uc | vc;
3884
3885
0
  ux = -uc;
3886
0
  vx = -vc;
3887
0
  rx = -rc;
3888
3889
  /* If the smaller input is negative, by sign extension higher limbs
3890
     don't matter. */
3891
0
  rn = vx ? vn : un;
3892
3893
0
  rp = MPZ_REALLOC (r, rn + (mp_size_t) rc);
3894
3895
0
  up = u->_mp_d;
3896
0
  vp = v->_mp_d;
3897
3898
0
  i = 0;
3899
0
  do
3900
0
    {
3901
0
      ul = (up[i] ^ ux) + uc;
3902
0
      uc = ul < uc;
3903
3904
0
      vl = (vp[i] ^ vx) + vc;
3905
0
      vc = vl < vc;
3906
3907
0
      rl = ( (ul | vl) ^ rx) + rc;
3908
0
      rc = rl < rc;
3909
0
      rp[i] = rl;
3910
0
    }
3911
0
  while (++i < vn);
3912
0
  assert (vc == 0);
3913
3914
0
  for (; i < rn; i++)
3915
0
    {
3916
0
      ul = (up[i] ^ ux) + uc;
3917
0
      uc = ul < uc;
3918
3919
0
      rl = ( (ul | vx) ^ rx) + rc;
3920
0
      rc = rl < rc;
3921
0
      rp[i] = rl;
3922
0
    }
3923
0
  if (rc)
3924
0
    rp[rn++] = rc;
3925
0
  else
3926
0
    rn = mpn_normalized_size (rp, rn);
3927
3928
0
  r->_mp_size = rx ? -rn : rn;
3929
0
}
3930
3931
void
3932
mpz_xor (mpz_t r, const mpz_t u, const mpz_t v)
3933
0
{
3934
0
  mp_size_t un, vn, i;
3935
0
  mp_ptr up, vp, rp;
3936
3937
0
  mp_limb_t ux, vx, rx;
3938
0
  mp_limb_t uc, vc, rc;
3939
0
  mp_limb_t ul, vl, rl;
3940
3941
0
  un = GMP_ABS (u->_mp_size);
3942
0
  vn = GMP_ABS (v->_mp_size);
3943
0
  if (un < vn)
3944
0
    {
3945
0
      MPZ_SRCPTR_SWAP (u, v);
3946
0
      MP_SIZE_T_SWAP (un, vn);
3947
0
    }
3948
0
  if (vn == 0)
3949
0
    {
3950
0
      mpz_set (r, u);
3951
0
      return;
3952
0
    }
3953
3954
0
  uc = u->_mp_size < 0;
3955
0
  vc = v->_mp_size < 0;
3956
0
  rc = uc ^ vc;
3957
3958
0
  ux = -uc;
3959
0
  vx = -vc;
3960
0
  rx = -rc;
3961
3962
0
  rp = MPZ_REALLOC (r, un + (mp_size_t) rc);
3963
3964
0
  up = u->_mp_d;
3965
0
  vp = v->_mp_d;
3966
3967
0
  i = 0;
3968
0
  do
3969
0
    {
3970
0
      ul = (up[i] ^ ux) + uc;
3971
0
      uc = ul < uc;
3972
3973
0
      vl = (vp[i] ^ vx) + vc;
3974
0
      vc = vl < vc;
3975
3976
0
      rl = (ul ^ vl ^ rx) + rc;
3977
0
      rc = rl < rc;
3978
0
      rp[i] = rl;
3979
0
    }
3980
0
  while (++i < vn);
3981
0
  assert (vc == 0);
3982
3983
0
  for (; i < un; i++)
3984
0
    {
3985
0
      ul = (up[i] ^ ux) + uc;
3986
0
      uc = ul < uc;
3987
3988
0
      rl = (ul ^ ux) + rc;
3989
0
      rc = rl < rc;
3990
0
      rp[i] = rl;
3991
0
    }
3992
0
  if (rc)
3993
0
    rp[un++] = rc;
3994
0
  else
3995
0
    un = mpn_normalized_size (rp, un);
3996
3997
0
  r->_mp_size = rx ? -un : un;
3998
0
}
3999
4000
static unsigned
4001
gmp_popcount_limb (mp_limb_t x)
4002
0
{
4003
0
  unsigned c;
4004
4005
  /* Do 16 bits at a time, to avoid limb-sized constants. */
4006
0
  int LOCAL_SHIFT_BITS = 16;
4007
0
  for (c = 0; x > 0;)
4008
0
    {
4009
0
      unsigned w = x - ((x >> 1) & 0x5555);
4010
0
      w = ((w >> 2) & 0x3333) + (w & 0x3333);
4011
0
      w =  (w >> 4) + w;
4012
0
      w = ((w >> 8) & 0x000f) + (w & 0x000f);
4013
0
      c += w;
4014
0
      if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS)
4015
0
  x >>= LOCAL_SHIFT_BITS;
4016
0
      else
4017
0
  x = 0;
4018
0
    }
4019
0
  return c;
4020
0
}
4021
4022
mp_bitcnt_t
4023
mpn_popcount (mp_srcptr p, mp_size_t n)
4024
0
{
4025
0
  mp_size_t i;
4026
0
  mp_bitcnt_t c;
4027
4028
0
  for (c = 0, i = 0; i < n; i++)
4029
0
    c += gmp_popcount_limb (p[i]);
4030
4031
0
  return c;
4032
0
}
4033
4034
mp_bitcnt_t
4035
mpz_popcount (const mpz_t u)
4036
0
{
4037
0
  mp_size_t un;
4038
4039
0
  un = u->_mp_size;
4040
4041
0
  if (un < 0)
4042
0
    return ~(mp_bitcnt_t) 0;
4043
4044
0
  return mpn_popcount (u->_mp_d, un);
4045
0
}
4046
4047
mp_bitcnt_t
4048
mpz_hamdist (const mpz_t u, const mpz_t v)
4049
0
{
4050
0
  mp_size_t un, vn, i;
4051
0
  mp_limb_t uc, vc, ul, vl, comp;
4052
0
  mp_srcptr up, vp;
4053
0
  mp_bitcnt_t c;
4054
4055
0
  un = u->_mp_size;
4056
0
  vn = v->_mp_size;
4057
4058
0
  if ( (un ^ vn) < 0)
4059
0
    return ~(mp_bitcnt_t) 0;
4060
4061
0
  comp = - (uc = vc = (un < 0));
4062
0
  if (uc)
4063
0
    {
4064
0
      assert (vn < 0);
4065
0
      un = -un;
4066
0
      vn = -vn;
4067
0
    }
4068
4069
0
  up = u->_mp_d;
4070
0
  vp = v->_mp_d;
4071
4072
0
  if (un < vn)
4073
0
    MPN_SRCPTR_SWAP (up, un, vp, vn);
4074
4075
0
  for (i = 0, c = 0; i < vn; i++)
4076
0
    {
4077
0
      ul = (up[i] ^ comp) + uc;
4078
0
      uc = ul < uc;
4079
4080
0
      vl = (vp[i] ^ comp) + vc;
4081
0
      vc = vl < vc;
4082
4083
0
      c += gmp_popcount_limb (ul ^ vl);
4084
0
    }
4085
0
  assert (vc == 0);
4086
4087
0
  for (; i < un; i++)
4088
0
    {
4089
0
      ul = (up[i] ^ comp) + uc;
4090
0
      uc = ul < uc;
4091
4092
0
      c += gmp_popcount_limb (ul ^ comp);
4093
0
    }
4094
4095
0
  return c;
4096
0
}
4097
4098
mp_bitcnt_t
4099
mpz_scan1 (const mpz_t u, mp_bitcnt_t starting_bit)
4100
0
{
4101
0
  mp_ptr up;
4102
0
  mp_size_t us, un, i;
4103
0
  mp_limb_t limb, ux;
4104
4105
0
  us = u->_mp_size;
4106
0
  un = GMP_ABS (us);
4107
0
  i = starting_bit / GMP_LIMB_BITS;
4108
4109
  /* Past the end there's no 1 bits for u>=0, or an immediate 1 bit
4110
     for u<0. Notice this test picks up any u==0 too. */
4111
0
  if (i >= un)
4112
0
    return (us >= 0 ? ~(mp_bitcnt_t) 0 : starting_bit);
4113
4114
0
  up = u->_mp_d;
4115
0
  ux = 0;
4116
0
  limb = up[i];
4117
4118
0
  if (starting_bit != 0)
4119
0
    {
4120
0
      if (us < 0)
4121
0
  {
4122
0
    ux = mpn_zero_p (up, i);
4123
0
    limb = ~ limb + ux;
4124
0
    ux = - (mp_limb_t) (limb >= ux);
4125
0
  }
4126
4127
      /* Mask to 0 all bits before starting_bit, thus ignoring them. */
4128
0
      limb &= GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS);
4129
0
    }
4130
4131
0
  return mpn_common_scan (limb, i, up, un, ux);
4132
0
}
4133
4134
mp_bitcnt_t
4135
mpz_scan0 (const mpz_t u, mp_bitcnt_t starting_bit)
4136
0
{
4137
0
  mp_ptr up;
4138
0
  mp_size_t us, un, i;
4139
0
  mp_limb_t limb, ux;
4140
4141
0
  us = u->_mp_size;
4142
0
  ux = - (mp_limb_t) (us >= 0);
4143
0
  un = GMP_ABS (us);
4144
0
  i = starting_bit / GMP_LIMB_BITS;
4145
4146
  /* When past end, there's an immediate 0 bit for u>=0, or no 0 bits for
4147
     u<0.  Notice this test picks up all cases of u==0 too. */
4148
0
  if (i >= un)
4149
0
    return (ux ? starting_bit : ~(mp_bitcnt_t) 0);
4150
4151
0
  up = u->_mp_d;
4152
0
  limb = up[i] ^ ux;
4153
4154
0
  if (ux == 0)
4155
0
    limb -= mpn_zero_p (up, i); /* limb = ~(~limb + zero_p) */
4156
4157
  /* Mask all bits before starting_bit, thus ignoring them. */
4158
0
  limb &= GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS);
4159
4160
0
  return mpn_common_scan (limb, i, up, un, ux);
4161
0
}
4162
4163

4164
/* MPZ base conversion. */
4165
4166
size_t
4167
mpz_sizeinbase (const mpz_t u, int base)
4168
3.68k
{
4169
3.68k
  mp_size_t un;
4170
3.68k
  mp_srcptr up;
4171
3.68k
  mp_ptr tp;
4172
3.68k
  mp_bitcnt_t bits;
4173
3.68k
  struct gmp_div_inverse bi;
4174
3.68k
  size_t ndigits;
4175
4176
3.68k
  assert (base >= 2);
4177
3.68k
  assert (base <= 62);
4178
4179
3.68k
  un = GMP_ABS (u->_mp_size);
4180
3.68k
  if (un == 0)
4181
12
    return 1;
4182
4183
3.67k
  up = u->_mp_d;
4184
4185
3.67k
  bits = (un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]);
4186
3.67k
  switch (base)
4187
3.67k
    {
4188
0
    case 2:
4189
0
      return bits;
4190
0
    case 4:
4191
0
      return (bits + 1) / 2;
4192
0
    case 8:
4193
0
      return (bits + 2) / 3;
4194
0
    case 16:
4195
0
      return (bits + 3) / 4;
4196
0
    case 32:
4197
0
      return (bits + 4) / 5;
4198
      /* FIXME: Do something more clever for the common case of base
4199
   10. */
4200
3.67k
    }
4201
4202
3.67k
  tp = gmp_xalloc_limbs (un);
4203
3.67k
  mpn_copyi (tp, up, un);
4204
3.67k
  mpn_div_qr_1_invert (&bi, base);
4205
4206
3.67k
  ndigits = 0;
4207
3.67k
  do
4208
348k
    {
4209
348k
      ndigits++;
4210
348k
      mpn_div_qr_1_preinv (tp, tp, un, &bi);
4211
348k
      un -= (tp[un-1] == 0);
4212
348k
    }
4213
348k
  while (un > 0);
4214
4215
3.67k
  gmp_free (tp);
4216
3.67k
  return ndigits;
4217
3.67k
}
4218
4219
char *
4220
mpz_get_str (char *sp, int base, const mpz_t u)
4221
3.68k
{
4222
3.68k
  unsigned bits;
4223
3.68k
  const char *digits;
4224
3.68k
  mp_size_t un;
4225
3.68k
  size_t i, sn;
4226
4227
3.68k
  digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
4228
3.68k
  if (base > 1)
4229
3.68k
    {
4230
3.68k
      if (base <= 36)
4231
3.68k
  digits = "0123456789abcdefghijklmnopqrstuvwxyz";
4232
0
      else if (base > 62)
4233
0
  return NULL;
4234
3.68k
    }
4235
0
  else if (base >= -1)
4236
0
    base = 10;
4237
0
  else
4238
0
    {
4239
0
      base = -base;
4240
0
      if (base > 36)
4241
0
  return NULL;
4242
0
    }
4243
4244
3.68k
  sn = 1 + mpz_sizeinbase (u, base);
4245
3.68k
  if (!sp)
4246
3.68k
    sp = (char *) gmp_xalloc (1 + sn);
4247
4248
3.68k
  un = GMP_ABS (u->_mp_size);
4249
4250
3.68k
  if (un == 0)
4251
12
    {
4252
12
      sp[0] = '0';
4253
12
      sp[1] = '\0';
4254
12
      return sp;
4255
12
    }
4256
4257
3.67k
  i = 0;
4258
4259
3.67k
  if (u->_mp_size < 0)
4260
0
    sp[i++] = '-';
4261
4262
3.67k
  bits = mpn_base_power_of_two_p (base);
4263
4264
3.67k
  if (bits)
4265
    /* Not modified in this case. */
4266
0
    sn = i + mpn_get_str_bits ((unsigned char *) sp + i, bits, u->_mp_d, un);
4267
3.67k
  else
4268
3.67k
    {
4269
3.67k
      struct mpn_base_info info;
4270
3.67k
      mp_ptr tp;
4271
4272
3.67k
      mpn_get_base_info (&info, base);
4273
3.67k
      tp = gmp_xalloc_limbs (un);
4274
3.67k
      mpn_copyi (tp, u->_mp_d, un);
4275
4276
3.67k
      sn = i + mpn_get_str_other ((unsigned char *) sp + i, base, &info, tp, un);
4277
3.67k
      gmp_free (tp);
4278
3.67k
    }
4279
4280
352k
  for (; i < sn; i++)
4281
348k
    sp[i] = digits[(unsigned char) sp[i]];
4282
4283
3.67k
  sp[sn] = '\0';
4284
3.67k
  return sp;
4285
3.68k
}
4286
4287
int
4288
mpz_set_str (mpz_t r, const char *sp, int base)
4289
5.83k
{
4290
5.83k
  unsigned bits, value_of_a;
4291
5.83k
  mp_size_t rn, alloc;
4292
5.83k
  mp_ptr rp;
4293
5.83k
  size_t dn;
4294
5.83k
  int sign;
4295
5.83k
  unsigned char *dp;
4296
4297
5.83k
  assert (base == 0 || (base >= 2 && base <= 62));
4298
4299
5.83k
  while (isspace( (unsigned char) *sp))
4300
0
    sp++;
4301
4302
5.83k
  sign = (*sp == '-');
4303
5.83k
  sp += sign;
4304
4305
5.83k
  if (base == 0)
4306
5.83k
    {
4307
5.83k
      if (sp[0] == '0')
4308
529
  {
4309
529
    if (sp[1] == 'x' || sp[1] == 'X')
4310
0
      {
4311
0
        base = 16;
4312
0
        sp += 2;
4313
0
      }
4314
529
    else if (sp[1] == 'b' || sp[1] == 'B')
4315
0
      {
4316
0
        base = 2;
4317
0
        sp += 2;
4318
0
      }
4319
529
    else
4320
529
      base = 8;
4321
529
  }
4322
5.30k
      else
4323
5.30k
  base = 10;
4324
5.83k
    }
4325
4326
5.83k
  if (!*sp)
4327
0
    {
4328
0
      r->_mp_size = 0;
4329
0
      return -1;
4330
0
    }
4331
5.83k
  dp = (unsigned char *) gmp_xalloc (strlen (sp));
4332
4333
5.83k
  value_of_a = (base > 36) ? 36 : 10;
4334
393k
  for (dn = 0; *sp; sp++)
4335
387k
    {
4336
387k
      unsigned digit;
4337
4338
387k
      if (isspace ((unsigned char) *sp))
4339
0
  continue;
4340
387k
      else if (*sp >= '0' && *sp <= '9')
4341
387k
  digit = *sp - '0';
4342
0
      else if (*sp >= 'a' && *sp <= 'z')
4343
0
  digit = *sp - 'a' + value_of_a;
4344
0
      else if (*sp >= 'A' && *sp <= 'Z')
4345
0
  digit = *sp - 'A' + 10;
4346
0
      else
4347
0
  digit = base; /* fail */
4348
4349
387k
      if (digit >= (unsigned) base)
4350
0
  {
4351
0
    gmp_free (dp);
4352
0
    r->_mp_size = 0;
4353
0
    return -1;
4354
0
  }
4355
4356
387k
      dp[dn++] = digit;
4357
387k
    }
4358
4359
5.83k
  if (!dn)
4360
0
    {
4361
0
      gmp_free (dp);
4362
0
      r->_mp_size = 0;
4363
0
      return -1;
4364
0
    }
4365
5.83k
  bits = mpn_base_power_of_two_p (base);
4366
4367
5.83k
  if (bits > 0)
4368
529
    {
4369
529
      alloc = (dn * bits + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS;
4370
529
      rp = MPZ_REALLOC (r, alloc);
4371
529
      rn = mpn_set_str_bits (rp, dp, dn, bits);
4372
529
    }
4373
5.30k
  else
4374
5.30k
    {
4375
5.30k
      struct mpn_base_info info;
4376
5.30k
      mpn_get_base_info (&info, base);
4377
5.30k
      alloc = (dn + info.exp - 1) / info.exp;
4378
5.30k
      rp = MPZ_REALLOC (r, alloc);
4379
5.30k
      rn = mpn_set_str_other (rp, dp, dn, base, &info);
4380
      /* Normalization, needed for all-zero input. */
4381
5.30k
      assert (rn > 0);
4382
5.30k
      rn -= rp[rn-1] == 0;
4383
5.30k
    }
4384
5.83k
  assert (rn <= alloc);
4385
5.83k
  gmp_free (dp);
4386
4387
5.83k
  r->_mp_size = sign ? - rn : rn;
4388
4389
5.83k
  return 0;
4390
5.83k
}
4391
4392
int
4393
mpz_init_set_str (mpz_t r, const char *sp, int base)
4394
4.11k
{
4395
4.11k
  mpz_init (r);
4396
4.11k
  return mpz_set_str (r, sp, base);
4397
4.11k
}
4398
4399
size_t
4400
mpz_out_str (FILE *stream, int base, const mpz_t x)
4401
0
{
4402
0
  char *str;
4403
0
  size_t len;
4404
4405
0
  str = mpz_get_str (NULL, base, x);
4406
0
  len = strlen (str);
4407
0
  len = fwrite (str, 1, len, stream);
4408
0
  gmp_free (str);
4409
0
  return len;
4410
0
}
4411
4412

4413
static int
4414
gmp_detect_endian (void)
4415
0
{
4416
0
  static const int i = 2;
4417
0
  const unsigned char *p = (const unsigned char *) &i;
4418
0
  return 1 - *p;
4419
0
}
4420
4421
/* Import and export. Does not support nails. */
4422
void
4423
mpz_import (mpz_t r, size_t count, int order, size_t size, int endian,
4424
      size_t nails, const void *src)
4425
0
{
4426
0
  const unsigned char *p;
4427
0
  ptrdiff_t word_step;
4428
0
  mp_ptr rp;
4429
0
  mp_size_t rn;
4430
4431
  /* The current (partial) limb. */
4432
0
  mp_limb_t limb;
4433
  /* The number of bytes already copied to this limb (starting from
4434
     the low end). */
4435
0
  size_t bytes;
4436
  /* The index where the limb should be stored, when completed. */
4437
0
  mp_size_t i;
4438
4439
0
  if (nails != 0)
4440
0
    gmp_die ("mpz_import: Nails not supported.");
4441
4442
0
  assert (order == 1 || order == -1);
4443
0
  assert (endian >= -1 && endian <= 1);
4444
4445
0
  if (endian == 0)
4446
0
    endian = gmp_detect_endian ();
4447
4448
0
  p = (unsigned char *) src;
4449
4450
0
  word_step = (order != endian) ? 2 * size : 0;
4451
4452
  /* Process bytes from the least significant end, so point p at the
4453
     least significant word. */
4454
0
  if (order == 1)
4455
0
    {
4456
0
      p += size * (count - 1);
4457
0
      word_step = - word_step;
4458
0
    }
4459
4460
  /* And at least significant byte of that word. */
4461
0
  if (endian == 1)
4462
0
    p += (size - 1);
4463
4464
0
  rn = (size * count + sizeof(mp_limb_t) - 1) / sizeof(mp_limb_t);
4465
0
  rp = MPZ_REALLOC (r, rn);
4466
4467
0
  for (limb = 0, bytes = 0, i = 0; count > 0; count--, p += word_step)
4468
0
    {
4469
0
      size_t j;
4470
0
      for (j = 0; j < size; j++, p -= (ptrdiff_t) endian)
4471
0
  {
4472
0
    limb |= (mp_limb_t) *p << (bytes++ * CHAR_BIT);
4473
0
    if (bytes == sizeof(mp_limb_t))
4474
0
      {
4475
0
        rp[i++] = limb;
4476
0
        bytes = 0;
4477
0
        limb = 0;
4478
0
      }
4479
0
  }
4480
0
    }
4481
0
  assert (i + (bytes > 0) == rn);
4482
0
  if (limb != 0)
4483
0
    rp[i++] = limb;
4484
0
  else
4485
0
    i = mpn_normalized_size (rp, i);
4486
4487
0
  r->_mp_size = i;
4488
0
}
4489
4490
void *
4491
mpz_export (void *r, size_t *countp, int order, size_t size, int endian,
4492
      size_t nails, const mpz_t u)
4493
0
{
4494
0
  size_t count;
4495
0
  mp_size_t un;
4496
4497
0
  if (nails != 0)
4498
0
    gmp_die ("mpz_import: Nails not supported.");
4499
4500
0
  assert (order == 1 || order == -1);
4501
0
  assert (endian >= -1 && endian <= 1);
4502
0
  assert (size > 0 || u->_mp_size == 0);
4503
4504
0
  un = u->_mp_size;
4505
0
  count = 0;
4506
0
  if (un != 0)
4507
0
    {
4508
0
      size_t k;
4509
0
      unsigned char *p;
4510
0
      ptrdiff_t word_step;
4511
      /* The current (partial) limb. */
4512
0
      mp_limb_t limb;
4513
      /* The number of bytes left to do in this limb. */
4514
0
      size_t bytes;
4515
      /* The index where the limb was read. */
4516
0
      mp_size_t i;
4517
4518
0
      un = GMP_ABS (un);
4519
4520
      /* Count bytes in top limb. */
4521
0
      limb = u->_mp_d[un-1];
4522
0
      assert (limb != 0);
4523
4524
0
      k = (GMP_LIMB_BITS <= CHAR_BIT);
4525
0
      if (!k)
4526
0
  {
4527
0
    do {
4528
0
      int LOCAL_CHAR_BIT = CHAR_BIT;
4529
0
      k++; limb >>= LOCAL_CHAR_BIT;
4530
0
    } while (limb != 0);
4531
0
  }
4532
      /* else limb = 0; */
4533
4534
0
      count = (k + (un-1) * sizeof (mp_limb_t) + size - 1) / size;
4535
4536
0
      if (!r)
4537
0
  r = gmp_xalloc (count * size);
4538
4539
0
      if (endian == 0)
4540
0
  endian = gmp_detect_endian ();
4541
4542
0
      p = (unsigned char *) r;
4543
4544
0
      word_step = (order != endian) ? 2 * size : 0;
4545
4546
      /* Process bytes from the least significant end, so point p at the
4547
   least significant word. */
4548
0
      if (order == 1)
4549
0
  {
4550
0
    p += size * (count - 1);
4551
0
    word_step = - word_step;
4552
0
  }
4553
4554
      /* And at least significant byte of that word. */
4555
0
      if (endian == 1)
4556
0
  p += (size - 1);
4557
4558
0
      for (bytes = 0, i = 0, k = 0; k < count; k++, p += word_step)
4559
0
  {
4560
0
    size_t j;
4561
0
    for (j = 0; j < size; ++j, p -= (ptrdiff_t) endian)
4562
0
      {
4563
0
        if (sizeof (mp_limb_t) == 1)
4564
0
    {
4565
0
      if (i < un)
4566
0
        *p = u->_mp_d[i++];
4567
0
      else
4568
0
        *p = 0;
4569
0
    }
4570
0
        else
4571
0
    {
4572
0
      int LOCAL_CHAR_BIT = CHAR_BIT;
4573
0
      if (bytes == 0)
4574
0
        {
4575
0
          if (i < un)
4576
0
      limb = u->_mp_d[i++];
4577
0
          bytes = sizeof (mp_limb_t);
4578
0
        }
4579
0
      *p = limb;
4580
0
      limb >>= LOCAL_CHAR_BIT;
4581
0
      bytes--;
4582
0
    }
4583
0
      }
4584
0
  }
4585
0
      assert (i == un);
4586
0
      assert (k == count);
4587
0
    }
4588
4589
0
  if (countp)
4590
0
    *countp = count;
4591
4592
0
  return r;
4593
0
}