Coverage Report

Created: 2023-02-22 06:14

/src/nettle-with-mini-gmp/mini-gmp.c
Line
Count
Source (jump to first uncovered line)
1
/* mini-gmp, a minimalistic implementation of a GNU GMP subset.
2
3
   Contributed to the GNU project by Niels Möller
4
5
Copyright 1991-1997, 1999-2020 Free Software Foundation, Inc.
6
7
This file is part of the GNU MP Library.
8
9
The GNU MP Library is free software; you can redistribute it and/or modify
10
it under the terms of either:
11
12
  * the GNU Lesser General Public License as published by the Free
13
    Software Foundation; either version 3 of the License, or (at your
14
    option) any later version.
15
16
or
17
18
  * the GNU General Public License as published by the Free Software
19
    Foundation; either version 2 of the License, or (at your option) any
20
    later version.
21
22
or both in parallel, as here.
23
24
The GNU MP Library is distributed in the hope that it will be useful, but
25
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
26
or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
27
for more details.
28
29
You should have received copies of the GNU General Public License and the
30
GNU Lesser General Public License along with the GNU MP Library.  If not,
31
see https://www.gnu.org/licenses/.  */
32
33
/* NOTE: All functions in this file which are not declared in
34
   mini-gmp.h are internal, and are not intended to be compatible
35
   neither with GMP nor with future versions of mini-gmp. */
36
37
/* Much of the material copied from GMP files, including: gmp-impl.h,
38
   longlong.h, mpn/generic/add_n.c, mpn/generic/addmul_1.c,
39
   mpn/generic/lshift.c, mpn/generic/mul_1.c,
40
   mpn/generic/mul_basecase.c, mpn/generic/rshift.c,
41
   mpn/generic/sbpi1_div_qr.c, mpn/generic/sub_n.c,
42
   mpn/generic/submul_1.c. */
43
44
#include <assert.h>
45
#include <ctype.h>
46
#include <limits.h>
47
#include <stdio.h>
48
#include <stdlib.h>
49
#include <string.h>
50
51
#include "mini-gmp.h"
52
53
#if !defined(MINI_GMP_DONT_USE_FLOAT_H)
54
#include <float.h>
55
#endif
56
57

58
/* Macros */
59
2.43G
#define GMP_LIMB_BITS (sizeof(mp_limb_t) * CHAR_BIT)
60
61
51.8k
#define GMP_LIMB_MAX ((mp_limb_t) ~ (mp_limb_t) 0)
62
51.0k
#define GMP_LIMB_HIGHBIT ((mp_limb_t) 1 << (GMP_LIMB_BITS - 1))
63
64
675M
#define GMP_HLIMB_BIT ((mp_limb_t) 1 << (GMP_LIMB_BITS / 2))
65
660M
#define GMP_LLIMB_MASK (GMP_HLIMB_BIT - 1)
66
67
220M
#define GMP_ULONG_BITS (sizeof(unsigned long) * CHAR_BIT)
68
0
#define GMP_ULONG_HIGHBIT ((unsigned long) 1 << (GMP_ULONG_BITS - 1))
69
70
54.5k
#define GMP_ABS(x) ((x) >= 0 ? (x) : -(x))
71
0
#define GMP_NEG_CAST(T,x) (-((T)((x) + 1) - 1))
72
73
0
#define GMP_MIN(a, b) ((a) < (b) ? (a) : (b))
74
13.9k
#define GMP_MAX(a, b) ((a) > (b) ? (a) : (b))
75
76
7.39k
#define GMP_CMP(a,b) (((a) > (b)) - ((a) < (b)))
77
78
#if defined(DBL_MANT_DIG) && FLT_RADIX == 2
79
0
#define GMP_DBL_MANT_BITS DBL_MANT_DIG
80
#else
81
#define GMP_DBL_MANT_BITS (53)
82
#endif
83
84
/* Return non-zero if xp,xsize and yp,ysize overlap.
85
   If xp+xsize<=yp there's no overlap, or if yp+ysize<=xp there's no
86
   overlap.  If both these are false, there's an overlap. */
87
#define GMP_MPN_OVERLAP_P(xp, xsize, yp, ysize)       \
88
  ((xp) + (xsize) > (yp) && (yp) + (ysize) > (xp))
89
90
3.61k
#define gmp_assert_nocarry(x) do { \
91
3.61k
    mp_limb_t __cy = (x);    \
92
3.61k
    assert (__cy == 0);      \
93
3.61k
  } while (0)
94
95
15.4k
#define gmp_clz(count, x) do {           \
96
15.4k
    mp_limb_t __clz_x = (x);            \
97
15.4k
    unsigned __clz_c = 0;           \
98
15.4k
    int LOCAL_SHIFT_BITS = 8;           \
99
15.4k
    if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS)       \
100
15.4k
      for (;                \
101
72.1k
     (__clz_x & ((mp_limb_t) 0xff << (GMP_LIMB_BITS - 8))) == 0; \
102
56.6k
     __clz_c += 8)           \
103
56.6k
  { __clz_x <<= LOCAL_SHIFT_BITS; }        \
104
51.0k
    for (; (__clz_x & GMP_LIMB_HIGHBIT) == 0; __clz_c++)   \
105
35.6k
      __clz_x <<= 1;             \
106
15.4k
    (count) = __clz_c;              \
107
15.4k
  } while (0)
108
109
0
#define gmp_ctz(count, x) do {           \
110
0
    mp_limb_t __ctz_x = (x);            \
111
0
    unsigned __ctz_c = 0;           \
112
0
    gmp_clz (__ctz_c, __ctz_x & - __ctz_x);        \
113
0
    (count) = GMP_LIMB_BITS - 1 - __ctz_c;       \
114
0
  } while (0)
115
116
#define gmp_add_ssaaaa(sh, sl, ah, al, bh, bl) \
117
1.49M
  do {                 \
118
1.49M
    mp_limb_t __x;              \
119
1.49M
    __x = (al) + (bl);              \
120
1.49M
    (sh) = (ah) + (bh) + (__x < (al));          \
121
1.49M
    (sl) = __x;               \
122
1.49M
  } while (0)
123
124
#define gmp_sub_ddmmss(sh, sl, ah, al, bh, bl) \
125
21.8k
  do {                 \
126
21.8k
    mp_limb_t __x;              \
127
21.8k
    __x = (al) - (bl);              \
128
21.8k
    (sh) = (ah) - (bh) - ((al) < (bl));         \
129
21.8k
    (sl) = __x;               \
130
21.8k
  } while (0)
131
132
#define gmp_umul_ppmm(w1, w0, u, v)         \
133
220M
  do {                 \
134
220M
    int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS;       \
135
220M
    if (sizeof(unsigned int) * CHAR_BIT >= 2 * GMP_LIMB_BITS)   \
136
220M
      {                 \
137
0
  unsigned int __ww = (unsigned int) (u) * (v);     \
138
0
  w0 = (mp_limb_t) __ww;            \
139
0
  w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS);     \
140
0
      }                  \
141
220M
    else if (GMP_ULONG_BITS >= 2 * GMP_LIMB_BITS)     \
142
220M
      {                 \
143
0
  unsigned long int __ww = (unsigned long int) (u) * (v);   \
144
0
  w0 = (mp_limb_t) __ww;            \
145
0
  w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS);     \
146
0
      }                  \
147
220M
    else {               \
148
220M
      mp_limb_t __x0, __x1, __x2, __x3;         \
149
220M
      unsigned __ul, __vl, __uh, __vh;          \
150
220M
      mp_limb_t __u = (u), __v = (v);         \
151
220M
                  \
152
220M
      __ul = __u & GMP_LLIMB_MASK;         \
153
220M
      __uh = __u >> (GMP_LIMB_BITS / 2);        \
154
220M
      __vl = __v & GMP_LLIMB_MASK;         \
155
220M
      __vh = __v >> (GMP_LIMB_BITS / 2);        \
156
220M
                  \
157
220M
      __x0 = (mp_limb_t) __ul * __vl;         \
158
220M
      __x1 = (mp_limb_t) __ul * __vh;         \
159
220M
      __x2 = (mp_limb_t) __uh * __vl;         \
160
220M
      __x3 = (mp_limb_t) __uh * __vh;         \
161
220M
                  \
162
220M
      __x1 += __x0 >> (GMP_LIMB_BITS / 2);/* this can't give carry */ \
163
220M
      __x1 += __x2;   /* but this indeed can */   \
164
220M
      if (__x1 < __x2)   /* did we get it? */      \
165
220M
  __x3 += GMP_HLIMB_BIT; /* yes, add it in the proper pos. */  \
166
220M
                  \
167
220M
      (w1) = __x3 + (__x1 >> (GMP_LIMB_BITS / 2));     \
168
220M
      (w0) = (__x1 << (GMP_LIMB_BITS / 2)) + (__x0 & GMP_LLIMB_MASK);  \
169
220M
    }                 \
170
220M
  } while (0)
171
172
#define gmp_udiv_qrnnd_preinv(q, r, nh, nl, d, di)      \
173
1.47M
  do {                 \
174
1.47M
    mp_limb_t _qh, _ql, _r, _mask;          \
175
1.47M
    gmp_umul_ppmm (_qh, _ql, (nh), (di));        \
176
1.47M
    gmp_add_ssaaaa (_qh, _ql, _qh, _ql, (nh) + 1, (nl));    \
177
1.47M
    _r = (nl) - _qh * (d);            \
178
1.47M
    _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */   \
179
1.47M
    _qh += _mask;             \
180
1.47M
    _r += _mask & (d);              \
181
1.47M
    if (_r >= (d))             \
182
1.47M
      {                 \
183
16
  _r -= (d);              \
184
16
  _qh++;                \
185
16
      }                  \
186
1.47M
                  \
187
1.47M
    (r) = _r;               \
188
1.47M
    (q) = _qh;                \
189
1.47M
  } while (0)
190
191
#define gmp_udiv_qr_3by2(q, r1, r0, n2, n1, n0, d1, d0, dinv)   \
192
10.9k
  do {                 \
193
10.9k
    mp_limb_t _q0, _t1, _t0, _mask;         \
194
10.9k
    gmp_umul_ppmm ((q), _q0, (n2), (dinv));        \
195
10.9k
    gmp_add_ssaaaa ((q), _q0, (q), _q0, (n2), (n1));      \
196
10.9k
                  \
197
10.9k
    /* Compute the two most significant limbs of n - q'd */   \
198
10.9k
    (r1) = (n1) - (d1) * (q);           \
199
10.9k
    gmp_sub_ddmmss ((r1), (r0), (r1), (n0), (d1), (d0));    \
200
10.9k
    gmp_umul_ppmm (_t1, _t0, (d0), (q));       \
201
10.9k
    gmp_sub_ddmmss ((r1), (r0), (r1), (r0), _t1, _t0);      \
202
10.9k
    (q)++;                \
203
10.9k
                  \
204
10.9k
    /* Conditionally adjust q and the remainders */     \
205
10.9k
    _mask = - (mp_limb_t) ((r1) >= _q0);        \
206
10.9k
    (q) += _mask;             \
207
10.9k
    gmp_add_ssaaaa ((r1), (r0), (r1), (r0), _mask & (d1), _mask & (d0)); \
208
10.9k
    if ((r1) >= (d1))             \
209
10.9k
      {                 \
210
64
  if ((r1) > (d1) || (r0) >= (d0))       \
211
64
    {               \
212
0
      (q)++;              \
213
0
      gmp_sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0));  \
214
0
    }                \
215
64
      }                  \
216
10.9k
  } while (0)
217
218
/* Swap macros. */
219
#define MP_LIMB_T_SWAP(x, y)            \
220
0
  do {                 \
221
0
    mp_limb_t __mp_limb_t_swap__tmp = (x);        \
222
0
    (x) = (y);                \
223
0
    (y) = __mp_limb_t_swap__tmp;          \
224
0
  } while (0)
225
#define MP_SIZE_T_SWAP(x, y)            \
226
8.02k
  do {                 \
227
8.02k
    mp_size_t __mp_size_t_swap__tmp = (x);        \
228
8.02k
    (x) = (y);                \
229
8.02k
    (y) = __mp_size_t_swap__tmp;          \
230
8.02k
  } while (0)
231
#define MP_BITCNT_T_SWAP(x,y)     \
232
0
  do {           \
233
0
    mp_bitcnt_t __mp_bitcnt_t_swap__tmp = (x);  \
234
0
    (x) = (y);          \
235
0
    (y) = __mp_bitcnt_t_swap__tmp;    \
236
0
  } while (0)
237
#define MP_PTR_SWAP(x, y)           \
238
3.89k
  do {                 \
239
3.89k
    mp_ptr __mp_ptr_swap__tmp = (x);          \
240
3.89k
    (x) = (y);                \
241
3.89k
    (y) = __mp_ptr_swap__tmp;           \
242
3.89k
  } while (0)
243
#define MP_SRCPTR_SWAP(x, y)            \
244
0
  do {                 \
245
0
    mp_srcptr __mp_srcptr_swap__tmp = (x);        \
246
0
    (x) = (y);                \
247
0
    (y) = __mp_srcptr_swap__tmp;          \
248
0
  } while (0)
249
250
#define MPN_PTR_SWAP(xp,xs, yp,ys)          \
251
  do {                  \
252
    MP_PTR_SWAP (xp, yp);           \
253
    MP_SIZE_T_SWAP (xs, ys);            \
254
  } while(0)
255
#define MPN_SRCPTR_SWAP(xp,xs, yp,ys)         \
256
0
  do {                 \
257
0
    MP_SRCPTR_SWAP (xp, yp);            \
258
0
    MP_SIZE_T_SWAP (xs, ys);            \
259
0
  } while(0)
260
261
#define MPZ_PTR_SWAP(x, y)            \
262
0
  do {                 \
263
0
    mpz_ptr __mpz_ptr_swap__tmp = (x);          \
264
0
    (x) = (y);                \
265
0
    (y) = __mpz_ptr_swap__tmp;            \
266
0
  } while (0)
267
#define MPZ_SRCPTR_SWAP(x, y)           \
268
234
  do {                 \
269
234
    mpz_srcptr __mpz_srcptr_swap__tmp = (x);        \
270
234
    (x) = (y);                \
271
234
    (y) = __mpz_srcptr_swap__tmp;         \
272
234
  } while (0)
273
274
const int mp_bits_per_limb = GMP_LIMB_BITS;
275
276

277
/* Memory allocation and other helper functions. */
278
static void
279
gmp_die (const char *msg)
280
0
{
281
0
  fprintf (stderr, "%s\n", msg);
282
0
  abort();
283
0
}
284
285
static void *
286
gmp_default_alloc (size_t size)
287
38.5k
{
288
38.5k
  void *p;
289
290
38.5k
  assert (size > 0);
291
292
38.5k
  p = malloc (size);
293
38.5k
  if (!p)
294
0
    gmp_die("gmp_default_alloc: Virtual memory exhausted.");
295
296
38.5k
  return p;
297
38.5k
}
298
299
static void *
300
gmp_default_realloc (void *old, size_t unused_old_size, size_t new_size)
301
1.11k
{
302
1.11k
  void * p;
303
304
1.11k
  p = realloc (old, new_size);
305
306
1.11k
  if (!p)
307
0
    gmp_die("gmp_default_realloc: Virtual memory exhausted.");
308
309
1.11k
  return p;
310
1.11k
}
311
312
static void
313
gmp_default_free (void *p, size_t unused_size)
314
35.0k
{
315
35.0k
  free (p);
316
35.0k
}
317
318
static void * (*gmp_allocate_func) (size_t) = gmp_default_alloc;
319
static void * (*gmp_reallocate_func) (void *, size_t, size_t) = gmp_default_realloc;
320
static void (*gmp_free_func) (void *, size_t) = gmp_default_free;
321
322
void
323
mp_get_memory_functions (void *(**alloc_func) (size_t),
324
       void *(**realloc_func) (void *, size_t, size_t),
325
       void (**free_func) (void *, size_t))
326
11.6k
{
327
11.6k
  if (alloc_func)
328
5.84k
    *alloc_func = gmp_allocate_func;
329
330
11.6k
  if (realloc_func)
331
0
    *realloc_func = gmp_reallocate_func;
332
333
11.6k
  if (free_func)
334
5.84k
    *free_func = gmp_free_func;
335
11.6k
}
336
337
void
338
mp_set_memory_functions (void *(*alloc_func) (size_t),
339
       void *(*realloc_func) (void *, size_t, size_t),
340
       void (*free_func) (void *, size_t))
341
0
{
342
0
  if (!alloc_func)
343
0
    alloc_func = gmp_default_alloc;
344
0
  if (!realloc_func)
345
0
    realloc_func = gmp_default_realloc;
346
0
  if (!free_func)
347
0
    free_func = gmp_default_free;
348
349
0
  gmp_allocate_func = alloc_func;
350
0
  gmp_reallocate_func = realloc_func;
351
0
  gmp_free_func = free_func;
352
0
}
353
354
32.7k
#define gmp_xalloc(size) ((*gmp_allocate_func)((size)))
355
29.2k
#define gmp_free(p) ((*gmp_free_func) ((p), 0))
356
357
static mp_ptr
358
gmp_xalloc_limbs (mp_size_t size)
359
24.2k
{
360
24.2k
  return (mp_ptr) gmp_xalloc (size * sizeof (mp_limb_t));
361
24.2k
}
362
363
static mp_ptr
364
gmp_xrealloc_limbs (mp_ptr old, mp_size_t size)
365
1.11k
{
366
1.11k
  assert (size > 0);
367
1.11k
  return (mp_ptr) (*gmp_reallocate_func) (old, 0, size * sizeof (mp_limb_t));
368
1.11k
}
369
370

371
/* MPN interface */
372
373
void
374
mpn_copyi (mp_ptr d, mp_srcptr s, mp_size_t n)
375
18.2k
{
376
18.2k
  mp_size_t i;
377
123k
  for (i = 0; i < n; i++)
378
105k
    d[i] = s[i];
379
18.2k
}
380
381
void
382
mpn_copyd (mp_ptr d, mp_srcptr s, mp_size_t n)
383
235
{
384
2.11k
  while (--n >= 0)
385
1.88k
    d[n] = s[n];
386
235
}
387
388
int
389
mpn_cmp (mp_srcptr ap, mp_srcptr bp, mp_size_t n)
390
3.83k
{
391
6.85k
  while (--n >= 0)
392
6.32k
    {
393
6.32k
      if (ap[n] != bp[n])
394
3.30k
  return ap[n] > bp[n] ? 1 : -1;
395
6.32k
    }
396
526
  return 0;
397
3.83k
}
398
399
static int
400
mpn_cmp4 (mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn)
401
2.76k
{
402
2.76k
  if (an != bn)
403
2.56k
    return an < bn ? -1 : 1;
404
209
  else
405
209
    return mpn_cmp (ap, bp, an);
406
2.76k
}
407
408
static mp_size_t
409
mpn_normalized_size (mp_srcptr xp, mp_size_t n)
410
14.8k
{
411
19.6k
  while (n > 0 && xp[n-1] == 0)
412
4.80k
    --n;
413
14.8k
  return n;
414
14.8k
}
415
416
int
417
mpn_zero_p(mp_srcptr rp, mp_size_t n)
418
1.71k
{
419
1.71k
  return mpn_normalized_size (rp, n) == 0;
420
1.71k
}
421
422
void
423
mpn_zero (mp_ptr rp, mp_size_t n)
424
8.05k
{
425
77.3k
  while (--n >= 0)
426
69.2k
    rp[n] = 0;
427
8.05k
}
428
429
mp_limb_t
430
mpn_add_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b)
431
21.4k
{
432
21.4k
  mp_size_t i;
433
434
21.4k
  assert (n > 0);
435
21.4k
  i = 0;
436
21.4k
  do
437
255k
    {
438
255k
      mp_limb_t r = ap[i] + b;
439
      /* Carry out */
440
255k
      b = (r < b);
441
255k
      rp[i] = r;
442
255k
    }
443
255k
  while (++i < n);
444
445
21.4k
  return b;
446
21.4k
}
447
448
mp_limb_t
449
mpn_add_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n)
450
912k
{
451
912k
  mp_size_t i;
452
912k
  mp_limb_t cy;
453
454
6.40M
  for (i = 0, cy = 0; i < n; i++)
455
5.49M
    {
456
5.49M
      mp_limb_t a, b, r;
457
5.49M
      a = ap[i]; b = bp[i];
458
5.49M
      r = a + cy;
459
5.49M
      cy = (r < cy);
460
5.49M
      r += b;
461
5.49M
      cy += (r < b);
462
5.49M
      rp[i] = r;
463
5.49M
    }
464
912k
  return cy;
465
912k
}
466
467
mp_limb_t
468
mpn_add (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn)
469
1.44k
{
470
1.44k
  mp_limb_t cy;
471
472
1.44k
  assert (an >= bn);
473
474
1.44k
  cy = mpn_add_n (rp, ap, bp, bn);
475
1.44k
  if (an > bn)
476
1.28k
    cy = mpn_add_1 (rp + bn, ap + bn, an - bn, cy);
477
1.44k
  return cy;
478
1.44k
}
479
480
mp_limb_t
481
mpn_sub_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b)
482
2.56k
{
483
2.56k
  mp_size_t i;
484
485
2.56k
  assert (n > 0);
486
487
2.56k
  i = 0;
488
2.56k
  do
489
16.2k
    {
490
16.2k
      mp_limb_t a = ap[i];
491
      /* Carry out */
492
16.2k
      mp_limb_t cy = a < b;
493
16.2k
      rp[i] = a - b;
494
16.2k
      b = cy;
495
16.2k
    }
496
16.2k
  while (++i < n);
497
498
2.56k
  return b;
499
2.56k
}
500
501
mp_limb_t
502
mpn_sub_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n)
503
2.01M
{
504
2.01M
  mp_size_t i;
505
2.01M
  mp_limb_t cy;
506
507
14.2M
  for (i = 0, cy = 0; i < n; i++)
508
12.2M
    {
509
12.2M
      mp_limb_t a, b;
510
12.2M
      a = ap[i]; b = bp[i];
511
12.2M
      b += cy;
512
12.2M
      cy = (b < cy);
513
12.2M
      cy += (a < b);
514
12.2M
      rp[i] = a - b;
515
12.2M
    }
516
2.01M
  return cy;
517
2.01M
}
518
519
mp_limb_t
520
mpn_sub (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn)
521
2.76k
{
522
2.76k
  mp_limb_t cy;
523
524
2.76k
  assert (an >= bn);
525
526
2.76k
  cy = mpn_sub_n (rp, ap, bp, bn);
527
2.76k
  if (an > bn)
528
2.56k
    cy = mpn_sub_1 (rp + bn, ap + bn, an - bn, cy);
529
2.76k
  return cy;
530
2.76k
}
531
532
mp_limb_t
533
mpn_mul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
534
5.42M
{
535
5.42M
  mp_limb_t ul, cl, hpl, lpl;
536
537
5.42M
  assert (n >= 1);
538
539
5.42M
  cl = 0;
540
5.42M
  do
541
32.6M
    {
542
32.6M
      ul = *up++;
543
32.6M
      gmp_umul_ppmm (hpl, lpl, ul, vl);
544
545
32.6M
      lpl += cl;
546
32.6M
      cl = (lpl < cl) + hpl;
547
548
32.6M
      *rp++ = lpl;
549
32.6M
    }
550
32.6M
  while (--n != 0);
551
552
5.42M
  return cl;
553
5.42M
}
554
555
mp_limb_t
556
mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
557
25.1M
{
558
25.1M
  mp_limb_t ul, cl, hpl, lpl, rl;
559
560
25.1M
  assert (n >= 1);
561
562
25.1M
  cl = 0;
563
25.1M
  do
564
175M
    {
565
175M
      ul = *up++;
566
175M
      gmp_umul_ppmm (hpl, lpl, ul, vl);
567
568
175M
      lpl += cl;
569
175M
      cl = (lpl < cl) + hpl;
570
571
175M
      rl = *rp;
572
175M
      lpl = rl + lpl;
573
175M
      cl += lpl < rl;
574
175M
      *rp++ = lpl;
575
175M
    }
576
175M
  while (--n != 0);
577
578
25.1M
  return cl;
579
25.1M
}
580
581
mp_limb_t
582
mpn_submul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
583
1.64M
{
584
1.64M
  mp_limb_t ul, cl, hpl, lpl, rl;
585
586
1.64M
  assert (n >= 1);
587
588
1.64M
  cl = 0;
589
1.64M
  do
590
10.0M
    {
591
10.0M
      ul = *up++;
592
10.0M
      gmp_umul_ppmm (hpl, lpl, ul, vl);
593
594
10.0M
      lpl += cl;
595
10.0M
      cl = (lpl < cl) + hpl;
596
597
10.0M
      rl = *rp;
598
10.0M
      lpl = rl - lpl;
599
10.0M
      cl += lpl > rl;
600
10.0M
      *rp++ = lpl;
601
10.0M
    }
602
10.0M
  while (--n != 0);
603
604
1.64M
  return cl;
605
1.64M
}
606
607
mp_limb_t
608
mpn_mul (mp_ptr rp, mp_srcptr up, mp_size_t un, mp_srcptr vp, mp_size_t vn)
609
4.79M
{
610
4.79M
  assert (un >= vn);
611
4.79M
  assert (vn >= 1);
612
4.79M
  assert (!GMP_MPN_OVERLAP_P(rp, un + vn, up, un));
613
4.79M
  assert (!GMP_MPN_OVERLAP_P(rp, un + vn, vp, vn));
614
615
  /* We first multiply by the low order limb. This result can be
616
     stored, not added, to rp. We also avoid a loop for zeroing this
617
     way. */
618
619
4.79M
  rp[un] = mpn_mul_1 (rp, up, un, vp[0]);
620
621
  /* Now accumulate the product of up[] and the next higher limb from
622
     vp[]. */
623
624
28.7M
  while (--vn >= 1)
625
23.9M
    {
626
23.9M
      rp += 1, vp += 1;
627
23.9M
      rp[un] = mpn_addmul_1 (rp, up, un, vp[0]);
628
23.9M
    }
629
4.79M
  return rp[un];
630
4.79M
}
631
632
void
633
mpn_mul_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n)
634
2.21M
{
635
2.21M
  mpn_mul (rp, ap, n, bp, n);
636
2.21M
}
637
638
void
639
mpn_sqr (mp_ptr rp, mp_srcptr ap, mp_size_t n)
640
2.58M
{
641
2.58M
  mpn_mul (rp, ap, n, ap, n);
642
2.58M
}
643
644
mp_limb_t
645
mpn_lshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt)
646
327k
{
647
327k
  mp_limb_t high_limb, low_limb;
648
327k
  unsigned int tnc;
649
327k
  mp_limb_t retval;
650
651
327k
  assert (n >= 1);
652
327k
  assert (cnt >= 1);
653
327k
  assert (cnt < GMP_LIMB_BITS);
654
655
327k
  up += n;
656
327k
  rp += n;
657
658
327k
  tnc = GMP_LIMB_BITS - cnt;
659
327k
  low_limb = *--up;
660
327k
  retval = low_limb >> tnc;
661
327k
  high_limb = (low_limb << cnt);
662
663
1.09M
  while (--n != 0)
664
769k
    {
665
769k
      low_limb = *--up;
666
769k
      *--rp = high_limb | (low_limb >> tnc);
667
769k
      high_limb = (low_limb << cnt);
668
769k
    }
669
327k
  *--rp = high_limb;
670
671
327k
  return retval;
672
327k
}
673
674
mp_limb_t
675
mpn_rshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt)
676
1.88M
{
677
1.88M
  mp_limb_t high_limb, low_limb;
678
1.88M
  unsigned int tnc;
679
1.88M
  mp_limb_t retval;
680
681
1.88M
  assert (n >= 1);
682
1.88M
  assert (cnt >= 1);
683
1.88M
  assert (cnt < GMP_LIMB_BITS);
684
685
1.88M
  tnc = GMP_LIMB_BITS - cnt;
686
1.88M
  high_limb = *up++;
687
1.88M
  retval = (high_limb << tnc);
688
1.88M
  low_limb = high_limb >> cnt;
689
690
12.4M
  while (--n != 0)
691
10.5M
    {
692
10.5M
      high_limb = *up++;
693
10.5M
      *rp++ = low_limb | (high_limb << tnc);
694
10.5M
      low_limb = high_limb >> cnt;
695
10.5M
    }
696
1.88M
  *rp = low_limb;
697
698
1.88M
  return retval;
699
1.88M
}
700
701
static mp_bitcnt_t
702
mpn_common_scan (mp_limb_t limb, mp_size_t i, mp_srcptr up, mp_size_t un,
703
     mp_limb_t ux)
704
0
{
705
0
  unsigned cnt;
706
707
0
  assert (ux == 0 || ux == GMP_LIMB_MAX);
708
0
  assert (0 <= i && i <= un );
709
710
0
  while (limb == 0)
711
0
    {
712
0
      i++;
713
0
      if (i == un)
714
0
  return (ux == 0 ? ~(mp_bitcnt_t) 0 : un * GMP_LIMB_BITS);
715
0
      limb = ux ^ up[i];
716
0
    }
717
0
  gmp_ctz (cnt, limb);
718
0
  return (mp_bitcnt_t) i * GMP_LIMB_BITS + cnt;
719
0
}
720
721
mp_bitcnt_t
722
mpn_scan1 (mp_srcptr ptr, mp_bitcnt_t bit)
723
0
{
724
0
  mp_size_t i;
725
0
  i = bit / GMP_LIMB_BITS;
726
727
0
  return mpn_common_scan ( ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)),
728
0
        i, ptr, i, 0);
729
0
}
730
731
mp_bitcnt_t
732
mpn_scan0 (mp_srcptr ptr, mp_bitcnt_t bit)
733
0
{
734
0
  mp_size_t i;
735
0
  i = bit / GMP_LIMB_BITS;
736
737
0
  return mpn_common_scan (~ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)),
738
0
        i, ptr, i, GMP_LIMB_MAX);
739
0
}
740
741
void
742
mpn_com (mp_ptr rp, mp_srcptr up, mp_size_t n)
743
0
{
744
0
  while (--n >= 0)
745
0
    *rp++ = ~ *up++;
746
0
}
747
748
mp_limb_t
749
mpn_neg (mp_ptr rp, mp_srcptr up, mp_size_t n)
750
0
{
751
0
  while (*up == 0)
752
0
    {
753
0
      *rp = 0;
754
0
      if (!--n)
755
0
  return 0;
756
0
      ++up; ++rp;
757
0
    }
758
0
  *rp = - *up;
759
0
  mpn_com (++rp, ++up, --n);
760
0
  return 1;
761
0
}
762
763

764
/* MPN division interface. */
765
766
/* The 3/2 inverse is defined as
767
768
     m = floor( (B^3-1) / (B u1 + u0)) - B
769
*/
770
mp_limb_t
771
mpn_invert_3by2 (mp_limb_t u1, mp_limb_t u0)
772
11.9k
{
773
11.9k
  mp_limb_t r, m;
774
775
11.9k
  {
776
11.9k
    mp_limb_t p, ql;
777
11.9k
    unsigned ul, uh, qh;
778
779
    /* For notation, let b denote the half-limb base, so that B = b^2.
780
       Split u1 = b uh + ul. */
781
11.9k
    ul = u1 & GMP_LLIMB_MASK;
782
11.9k
    uh = u1 >> (GMP_LIMB_BITS / 2);
783
784
    /* Approximation of the high half of quotient. Differs from the 2/1
785
       inverse of the half limb uh, since we have already subtracted
786
       u0. */
787
11.9k
    qh = (u1 ^ GMP_LIMB_MAX) / uh;
788
789
    /* Adjust to get a half-limb 3/2 inverse, i.e., we want
790
791
       qh' = floor( (b^3 - 1) / u) - b = floor ((b^3 - b u - 1) / u
792
     = floor( (b (~u) + b-1) / u),
793
794
       and the remainder
795
796
       r = b (~u) + b-1 - qh (b uh + ul)
797
       = b (~u - qh uh) + b-1 - qh ul
798
799
       Subtraction of qh ul may underflow, which implies adjustments.
800
       But by normalization, 2 u >= B > qh ul, so we need to adjust by
801
       at most 2.
802
    */
803
804
11.9k
    r = ((~u1 - (mp_limb_t) qh * uh) << (GMP_LIMB_BITS / 2)) | GMP_LLIMB_MASK;
805
806
11.9k
    p = (mp_limb_t) qh * ul;
807
    /* Adjustment steps taken from udiv_qrnnd_c */
808
11.9k
    if (r < p)
809
3.46k
      {
810
3.46k
  qh--;
811
3.46k
  r += u1;
812
3.46k
  if (r >= u1) /* i.e. we didn't get carry when adding to r */
813
3.46k
    if (r < p)
814
0
      {
815
0
        qh--;
816
0
        r += u1;
817
0
      }
818
3.46k
      }
819
11.9k
    r -= p;
820
821
    /* Low half of the quotient is
822
823
       ql = floor ( (b r + b-1) / u1).
824
825
       This is a 3/2 division (on half-limbs), for which qh is a
826
       suitable inverse. */
827
828
11.9k
    p = (r >> (GMP_LIMB_BITS / 2)) * qh + r;
829
    /* Unlike full-limb 3/2, we can add 1 without overflow. For this to
830
       work, it is essential that ql is a full mp_limb_t. */
831
11.9k
    ql = (p >> (GMP_LIMB_BITS / 2)) + 1;
832
833
    /* By the 3/2 trick, we don't need the high half limb. */
834
11.9k
    r = (r << (GMP_LIMB_BITS / 2)) + GMP_LLIMB_MASK - ql * u1;
835
836
11.9k
    if (r >= (GMP_LIMB_MAX & (p << (GMP_LIMB_BITS / 2))))
837
0
      {
838
0
  ql--;
839
0
  r += u1;
840
0
      }
841
11.9k
    m = ((mp_limb_t) qh << (GMP_LIMB_BITS / 2)) + ql;
842
11.9k
    if (r >= u1)
843
0
      {
844
0
  m++;
845
0
  r -= u1;
846
0
      }
847
11.9k
  }
848
849
  /* Now m is the 2/1 inverse of u1. If u0 > 0, adjust it to become a
850
     3/2 inverse. */
851
11.9k
  if (u0 > 0)
852
1.11k
    {
853
1.11k
      mp_limb_t th, tl;
854
1.11k
      r = ~r;
855
1.11k
      r += u0;
856
1.11k
      if (r < u0)
857
1.11k
  {
858
1.11k
    m--;
859
1.11k
    if (r >= u1)
860
0
      {
861
0
        m--;
862
0
        r -= u1;
863
0
      }
864
1.11k
    r -= u1;
865
1.11k
  }
866
1.11k
      gmp_umul_ppmm (th, tl, u0, m);
867
1.11k
      r += th;
868
1.11k
      if (r < th)
869
0
  {
870
0
    m--;
871
0
    m -= ((r > u1) | ((r == u1) & (tl > u0)));
872
0
  }
873
1.11k
    }
874
875
11.9k
  return m;
876
11.9k
}
877
878
struct gmp_div_inverse
879
{
880
  /* Normalization shift count. */
881
  unsigned shift;
882
  /* Normalized divisor (d0 unused for mpn_div_qr_1) */
883
  mp_limb_t d1, d0;
884
  /* Inverse, for 2/1 or 3/2. */
885
  mp_limb_t di;
886
};
887
888
static void
889
mpn_div_qr_1_invert (struct gmp_div_inverse *inv, mp_limb_t d)
890
10.5k
{
891
10.5k
  unsigned shift;
892
893
10.5k
  assert (d > 0);
894
10.5k
  gmp_clz (shift, d);
895
10.5k
  inv->shift = shift;
896
10.5k
  inv->d1 = d << shift;
897
10.5k
  inv->di = mpn_invert_limb (inv->d1);
898
10.5k
}
899
900
static void
901
mpn_div_qr_2_invert (struct gmp_div_inverse *inv,
902
         mp_limb_t d1, mp_limb_t d0)
903
0
{
904
0
  unsigned shift;
905
906
0
  assert (d1 > 0);
907
0
  gmp_clz (shift, d1);
908
0
  inv->shift = shift;
909
0
  if (shift > 0)
910
0
    {
911
0
      d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift));
912
0
      d0 <<= shift;
913
0
    }
914
0
  inv->d1 = d1;
915
0
  inv->d0 = d0;
916
0
  inv->di = mpn_invert_3by2 (d1, d0);
917
0
}
918
919
static void
920
mpn_div_qr_invert (struct gmp_div_inverse *inv,
921
       mp_srcptr dp, mp_size_t dn)
922
1.40k
{
923
1.40k
  assert (dn > 0);
924
925
1.40k
  if (dn == 1)
926
0
    mpn_div_qr_1_invert (inv, dp[0]);
927
1.40k
  else if (dn == 2)
928
0
    mpn_div_qr_2_invert (inv, dp[1], dp[0]);
929
1.40k
  else
930
1.40k
    {
931
1.40k
      unsigned shift;
932
1.40k
      mp_limb_t d1, d0;
933
934
1.40k
      d1 = dp[dn-1];
935
1.40k
      d0 = dp[dn-2];
936
1.40k
      assert (d1 > 0);
937
1.40k
      gmp_clz (shift, d1);
938
1.40k
      inv->shift = shift;
939
1.40k
      if (shift > 0)
940
424
  {
941
424
    d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift));
942
424
    d0 = (d0 << shift) | (dp[dn-3] >> (GMP_LIMB_BITS - shift));
943
424
  }
944
1.40k
      inv->d1 = d1;
945
1.40k
      inv->d0 = d0;
946
1.40k
      inv->di = mpn_invert_3by2 (d1, d0);
947
1.40k
    }
948
1.40k
}
949
950
/* Not matching current public gmp interface, rather corresponding to
951
   the sbpi1_div_* functions. */
952
static mp_limb_t
953
mpn_div_qr_1_preinv (mp_ptr qp, mp_srcptr np, mp_size_t nn,
954
         const struct gmp_div_inverse *inv)
955
342k
{
956
342k
  mp_limb_t d, di;
957
342k
  mp_limb_t r;
958
342k
  mp_ptr tp = NULL;
959
960
342k
  if (inv->shift > 0)
961
326k
    {
962
      /* Shift, reusing qp area if possible. In-place shift if qp == np. */
963
326k
      tp = qp ? qp : gmp_xalloc_limbs (nn);
964
326k
      r = mpn_lshift (tp, np, nn, inv->shift);
965
326k
      np = tp;
966
326k
    }
967
16.1k
  else
968
16.1k
    r = 0;
969
970
342k
  d = inv->d1;
971
342k
  di = inv->di;
972
1.49M
  while (--nn >= 0)
973
1.15M
    {
974
1.15M
      mp_limb_t q;
975
976
1.15M
      gmp_udiv_qrnnd_preinv (q, r, r, np[nn], d, di);
977
1.15M
      if (qp)
978
1.15M
  qp[nn] = q;
979
1.15M
    }
980
342k
  if ((inv->shift > 0) && (tp != qp))
981
0
    gmp_free (tp);
982
983
342k
  return r >> inv->shift;
984
342k
}
985
986
static void
987
mpn_div_qr_2_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn,
988
         const struct gmp_div_inverse *inv)
989
0
{
990
0
  unsigned shift;
991
0
  mp_size_t i;
992
0
  mp_limb_t d1, d0, di, r1, r0;
993
994
0
  assert (nn >= 2);
995
0
  shift = inv->shift;
996
0
  d1 = inv->d1;
997
0
  d0 = inv->d0;
998
0
  di = inv->di;
999
1000
0
  if (shift > 0)
1001
0
    r1 = mpn_lshift (np, np, nn, shift);
1002
0
  else
1003
0
    r1 = 0;
1004
1005
0
  r0 = np[nn - 1];
1006
1007
0
  i = nn - 2;
1008
0
  do
1009
0
    {
1010
0
      mp_limb_t n0, q;
1011
0
      n0 = np[i];
1012
0
      gmp_udiv_qr_3by2 (q, r1, r0, r1, r0, n0, d1, d0, di);
1013
1014
0
      if (qp)
1015
0
  qp[i] = q;
1016
0
    }
1017
0
  while (--i >= 0);
1018
1019
0
  if (shift > 0)
1020
0
    {
1021
0
      assert ((r0 & (GMP_LIMB_MAX >> (GMP_LIMB_BITS - shift))) == 0);
1022
0
      r0 = (r0 >> shift) | (r1 << (GMP_LIMB_BITS - shift));
1023
0
      r1 >>= shift;
1024
0
    }
1025
1026
0
  np[1] = r1;
1027
0
  np[0] = r0;
1028
0
}
1029
1030
static void
1031
mpn_div_qr_pi1 (mp_ptr qp,
1032
    mp_ptr np, mp_size_t nn, mp_limb_t n1,
1033
    mp_srcptr dp, mp_size_t dn,
1034
    mp_limb_t dinv)
1035
1.40k
{
1036
1.40k
  mp_size_t i;
1037
1038
1.40k
  mp_limb_t d1, d0;
1039
1.40k
  mp_limb_t cy, cy1;
1040
1.40k
  mp_limb_t q;
1041
1042
1.40k
  assert (dn > 2);
1043
1.40k
  assert (nn >= dn);
1044
1045
1.40k
  d1 = dp[dn - 1];
1046
1.40k
  d0 = dp[dn - 2];
1047
1048
1.40k
  assert ((d1 & GMP_LIMB_HIGHBIT) != 0);
1049
  /* Iteration variable is the index of the q limb.
1050
   *
1051
   * We divide <n1, np[dn-1+i], np[dn-2+i], np[dn-3+i],..., np[i]>
1052
   * by            <d1,          d0,        dp[dn-3],  ..., dp[0] >
1053
   */
1054
1055
1.40k
  i = nn - dn;
1056
1.40k
  do
1057
11.0k
    {
1058
11.0k
      mp_limb_t n0 = np[dn-1+i];
1059
1060
11.0k
      if (n1 == d1 && n0 == d0)
1061
87
  {
1062
87
    q = GMP_LIMB_MAX;
1063
87
    mpn_submul_1 (np+i, dp, dn, q);
1064
87
    n1 = np[dn-1+i];  /* update n1, last loop's value will now be invalid */
1065
87
  }
1066
10.9k
      else
1067
10.9k
  {
1068
10.9k
    gmp_udiv_qr_3by2 (q, n1, n0, n1, n0, np[dn-2+i], d1, d0, dinv);
1069
1070
10.9k
    cy = mpn_submul_1 (np + i, dp, dn-2, q);
1071
1072
10.9k
    cy1 = n0 < cy;
1073
10.9k
    n0 = n0 - cy;
1074
10.9k
    cy = n1 < cy1;
1075
10.9k
    n1 = n1 - cy1;
1076
10.9k
    np[dn-2+i] = n0;
1077
1078
10.9k
    if (cy != 0)
1079
83
      {
1080
83
        n1 += d1 + mpn_add_n (np + i, np + i, dp, dn - 1);
1081
83
        q--;
1082
83
      }
1083
10.9k
  }
1084
1085
11.0k
      if (qp)
1086
0
  qp[i] = q;
1087
11.0k
    }
1088
11.0k
  while (--i >= 0);
1089
1090
1.40k
  np[dn - 1] = n1;
1091
1.40k
}
1092
1093
static void
1094
mpn_div_qr_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn,
1095
       mp_srcptr dp, mp_size_t dn,
1096
       const struct gmp_div_inverse *inv)
1097
1.40k
{
1098
1.40k
  assert (dn > 0);
1099
1.40k
  assert (nn >= dn);
1100
1101
1.40k
  if (dn == 1)
1102
0
    np[0] = mpn_div_qr_1_preinv (qp, np, nn, inv);
1103
1.40k
  else if (dn == 2)
1104
0
    mpn_div_qr_2_preinv (qp, np, nn, inv);
1105
1.40k
  else
1106
1.40k
    {
1107
1.40k
      mp_limb_t nh;
1108
1.40k
      unsigned shift;
1109
1110
1.40k
      assert (inv->d1 == dp[dn-1]);
1111
1.40k
      assert (inv->d0 == dp[dn-2]);
1112
1.40k
      assert ((inv->d1 & GMP_LIMB_HIGHBIT) != 0);
1113
1114
1.40k
      shift = inv->shift;
1115
1.40k
      if (shift > 0)
1116
424
  nh = mpn_lshift (np, np, nn, shift);
1117
981
      else
1118
981
  nh = 0;
1119
1120
1.40k
      mpn_div_qr_pi1 (qp, np, nn, nh, dp, dn, inv->di);
1121
1122
1.40k
      if (shift > 0)
1123
424
  gmp_assert_nocarry (mpn_rshift (np, np, dn, shift));
1124
1.40k
    }
1125
1.40k
}
1126
1127
static void
1128
mpn_div_qr (mp_ptr qp, mp_ptr np, mp_size_t nn, mp_srcptr dp, mp_size_t dn)
1129
1.40k
{
1130
1.40k
  struct gmp_div_inverse inv;
1131
1.40k
  mp_ptr tp = NULL;
1132
1133
1.40k
  assert (dn > 0);
1134
1.40k
  assert (nn >= dn);
1135
1136
1.40k
  mpn_div_qr_invert (&inv, dp, dn);
1137
1.40k
  if (dn > 2 && inv.shift > 0)
1138
424
    {
1139
424
      tp = gmp_xalloc_limbs (dn);
1140
424
      gmp_assert_nocarry (mpn_lshift (tp, dp, dn, inv.shift));
1141
424
      dp = tp;
1142
424
    }
1143
1.40k
  mpn_div_qr_preinv (qp, np, nn, dp, dn, &inv);
1144
1.40k
  if (tp)
1145
424
    gmp_free (tp);
1146
1.40k
}
1147
1148

1149
/* MPN base conversion. */
1150
static unsigned
1151
mpn_base_power_of_two_p (unsigned b)
1152
8.54k
{
1153
8.54k
  switch (b)
1154
8.54k
    {
1155
0
    case 2: return 1;
1156
0
    case 4: return 2;
1157
286
    case 8: return 3;
1158
0
    case 16: return 4;
1159
0
    case 32: return 5;
1160
0
    case 64: return 6;
1161
0
    case 128: return 7;
1162
0
    case 256: return 8;
1163
8.25k
    default: return 0;
1164
8.54k
    }
1165
8.54k
}
1166
1167
struct mpn_base_info
1168
{
1169
  /* bb is the largest power of the base which fits in one limb, and
1170
     exp is the corresponding exponent. */
1171
  unsigned exp;
1172
  mp_limb_t bb;
1173
};
1174
1175
static void
1176
mpn_get_base_info (struct mpn_base_info *info, mp_limb_t b)
1177
8.25k
{
1178
8.25k
  mp_limb_t m;
1179
8.25k
  mp_limb_t p;
1180
8.25k
  unsigned exp;
1181
1182
8.25k
  m = GMP_LIMB_MAX / b;
1183
156k
  for (exp = 1, p = b; p <= m; exp++)
1184
148k
    p *= b;
1185
1186
8.25k
  info->exp = exp;
1187
8.25k
  info->bb = p;
1188
8.25k
}
1189
1190
static mp_bitcnt_t
1191
mpn_limb_size_in_base_2 (mp_limb_t u)
1192
3.52k
{
1193
3.52k
  unsigned shift;
1194
1195
3.52k
  assert (u > 0);
1196
3.52k
  gmp_clz (shift, u);
1197
3.52k
  return GMP_LIMB_BITS - shift;
1198
3.52k
}
1199
1200
static size_t
1201
mpn_get_str_bits (unsigned char *sp, unsigned bits, mp_srcptr up, mp_size_t un)
1202
0
{
1203
0
  unsigned char mask;
1204
0
  size_t sn, j;
1205
0
  mp_size_t i;
1206
0
  unsigned shift;
1207
1208
0
  sn = ((un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1])
1209
0
  + bits - 1) / bits;
1210
1211
0
  mask = (1U << bits) - 1;
1212
1213
0
  for (i = 0, j = sn, shift = 0; j-- > 0;)
1214
0
    {
1215
0
      unsigned char digit = up[i] >> shift;
1216
1217
0
      shift += bits;
1218
1219
0
      if (shift >= GMP_LIMB_BITS && ++i < un)
1220
0
  {
1221
0
    shift -= GMP_LIMB_BITS;
1222
0
    digit |= up[i] << (bits - shift);
1223
0
  }
1224
0
      sp[j] = digit & mask;
1225
0
    }
1226
0
  return sn;
1227
0
}
1228
1229
/* We generate digits from the least significant end, and reverse at
1230
   the end. */
1231
static size_t
1232
mpn_limb_get_str (unsigned char *sp, mp_limb_t w,
1233
      const struct gmp_div_inverse *binv)
1234
19.7k
{
1235
19.7k
  mp_size_t i;
1236
344k
  for (i = 0; w > 0; i++)
1237
324k
    {
1238
324k
      mp_limb_t h, l, r;
1239
1240
324k
      h = w >> (GMP_LIMB_BITS - binv->shift);
1241
324k
      l = w << binv->shift;
1242
1243
324k
      gmp_udiv_qrnnd_preinv (w, r, h, l, binv->d1, binv->di);
1244
324k
      assert ((r & (GMP_LIMB_MAX >> (GMP_LIMB_BITS - binv->shift))) == 0);
1245
324k
      r >>= binv->shift;
1246
1247
324k
      sp[i] = r;
1248
324k
    }
1249
19.7k
  return i;
1250
19.7k
}
1251
1252
static size_t
1253
mpn_get_str_other (unsigned char *sp,
1254
       int base, const struct mpn_base_info *info,
1255
       mp_ptr up, mp_size_t un)
1256
3.52k
{
1257
3.52k
  struct gmp_div_inverse binv;
1258
3.52k
  size_t sn;
1259
3.52k
  size_t i;
1260
1261
3.52k
  mpn_div_qr_1_invert (&binv, base);
1262
1263
3.52k
  sn = 0;
1264
1265
3.52k
  if (un > 1)
1266
3.46k
    {
1267
3.46k
      struct gmp_div_inverse bbinv;
1268
3.46k
      mpn_div_qr_1_invert (&bbinv, info->bb);
1269
1270
3.46k
      do
1271
16.1k
  {
1272
16.1k
    mp_limb_t w;
1273
16.1k
    size_t done;
1274
16.1k
    w = mpn_div_qr_1_preinv (up, up, un, &bbinv);
1275
16.1k
    un -= (up[un-1] == 0);
1276
16.1k
    done = mpn_limb_get_str (sp + sn, w, &binv);
1277
1278
18.1k
    for (sn += done; done < info->exp; done++)
1279
1.99k
      sp[sn++] = 0;
1280
16.1k
  }
1281
16.1k
      while (un > 1);
1282
3.46k
    }
1283
3.52k
  sn += mpn_limb_get_str (sp + sn, up[0], &binv);
1284
1285
  /* Reverse order */
1286
165k
  for (i = 0; 2*i + 1 < sn; i++)
1287
162k
    {
1288
162k
      unsigned char t = sp[i];
1289
162k
      sp[i] = sp[sn - i - 1];
1290
162k
      sp[sn - i - 1] = t;
1291
162k
    }
1292
1293
3.52k
  return sn;
1294
3.52k
}
1295
1296
size_t
1297
mpn_get_str (unsigned char *sp, int base, mp_ptr up, mp_size_t un)
1298
0
{
1299
0
  unsigned bits;
1300
1301
0
  assert (un > 0);
1302
0
  assert (up[un-1] > 0);
1303
1304
0
  bits = mpn_base_power_of_two_p (base);
1305
0
  if (bits)
1306
0
    return mpn_get_str_bits (sp, bits, up, un);
1307
0
  else
1308
0
    {
1309
0
      struct mpn_base_info info;
1310
1311
0
      mpn_get_base_info (&info, base);
1312
0
      return mpn_get_str_other (sp, base, &info, up, un);
1313
0
    }
1314
0
}
1315
1316
static mp_size_t
1317
mpn_set_str_bits (mp_ptr rp, const unsigned char *sp, size_t sn,
1318
      unsigned bits)
1319
286
{
1320
286
  mp_size_t rn;
1321
286
  size_t j;
1322
286
  unsigned shift;
1323
1324
572
  for (j = sn, rn = 0, shift = 0; j-- > 0; )
1325
286
    {
1326
286
      if (shift == 0)
1327
286
  {
1328
286
    rp[rn++] = sp[j];
1329
286
    shift += bits;
1330
286
  }
1331
0
      else
1332
0
  {
1333
0
    rp[rn-1] |= (mp_limb_t) sp[j] << shift;
1334
0
    shift += bits;
1335
0
    if (shift >= GMP_LIMB_BITS)
1336
0
      {
1337
0
        shift -= GMP_LIMB_BITS;
1338
0
        if (shift > 0)
1339
0
    rp[rn++] = (mp_limb_t) sp[j] >> (bits - shift);
1340
0
      }
1341
0
  }
1342
286
    }
1343
286
  rn = mpn_normalized_size (rp, rn);
1344
286
  return rn;
1345
286
}
1346
1347
/* Result is usually normalized, except for all-zero input, in which
1348
   case a single zero limb is written at *RP, and 1 is returned. */
1349
static mp_size_t
1350
mpn_set_str_other (mp_ptr rp, const unsigned char *sp, size_t sn,
1351
       mp_limb_t b, const struct mpn_base_info *info)
1352
4.73k
{
1353
4.73k
  mp_size_t rn;
1354
4.73k
  mp_limb_t w;
1355
4.73k
  unsigned k;
1356
4.73k
  size_t j;
1357
1358
4.73k
  assert (sn > 0);
1359
1360
4.73k
  k = 1 + (sn - 1) % info->exp;
1361
1362
4.73k
  j = 0;
1363
4.73k
  w = sp[j++];
1364
26.6k
  while (--k != 0)
1365
21.9k
    w = w * b + sp[j++];
1366
1367
4.73k
  rp[0] = w;
1368
1369
24.9k
  for (rn = 1; j < sn;)
1370
20.1k
    {
1371
20.1k
      mp_limb_t cy;
1372
1373
20.1k
      w = sp[j++];
1374
383k
      for (k = 1; k < info->exp; k++)
1375
363k
  w = w * b + sp[j++];
1376
1377
20.1k
      cy = mpn_mul_1 (rp, rp, rn, info->bb);
1378
20.1k
      cy += mpn_add_1 (rp, rp, rn, w);
1379
20.1k
      if (cy > 0)
1380
18.1k
  rp[rn++] = cy;
1381
20.1k
    }
1382
4.73k
  assert (j == sn);
1383
1384
4.73k
  return rn;
1385
4.73k
}
1386
1387
mp_size_t
1388
mpn_set_str (mp_ptr rp, const unsigned char *sp, size_t sn, int base)
1389
0
{
1390
0
  unsigned bits;
1391
1392
0
  if (sn == 0)
1393
0
    return 0;
1394
1395
0
  bits = mpn_base_power_of_two_p (base);
1396
0
  if (bits)
1397
0
    return mpn_set_str_bits (rp, sp, sn, bits);
1398
0
  else
1399
0
    {
1400
0
      struct mpn_base_info info;
1401
1402
0
      mpn_get_base_info (&info, base);
1403
0
      return mpn_set_str_other (rp, sp, sn, base, &info);
1404
0
    }
1405
0
}
1406
1407

1408
/* MPZ interface */
1409
void
1410
mpz_init (mpz_t r)
1411
19.6k
{
1412
19.6k
  static const mp_limb_t dummy_limb = GMP_LIMB_MAX & 0xc1a0;
1413
1414
19.6k
  r->_mp_alloc = 0;
1415
19.6k
  r->_mp_size = 0;
1416
19.6k
  r->_mp_d = (mp_ptr) &dummy_limb;
1417
19.6k
}
1418
1419
/* The utility of this function is a bit limited, since many functions
1420
   assigns the result variable using mpz_swap. */
1421
void
1422
mpz_init2 (mpz_t r, mp_bitcnt_t bits)
1423
3.89k
{
1424
3.89k
  mp_size_t rn;
1425
1426
3.89k
  bits -= (bits != 0);    /* Round down, except if 0 */
1427
3.89k
  rn = 1 + bits / GMP_LIMB_BITS;
1428
1429
3.89k
  r->_mp_alloc = rn;
1430
3.89k
  r->_mp_size = 0;
1431
3.89k
  r->_mp_d = gmp_xalloc_limbs (rn);
1432
3.89k
}
1433
1434
void
1435
mpz_clear (mpz_t r)
1436
20.4k
{
1437
20.4k
  if (r->_mp_alloc)
1438
16.7k
    gmp_free (r->_mp_d);
1439
20.4k
}
1440
1441
static mp_ptr
1442
mpz_realloc (mpz_t r, mp_size_t size)
1443
13.9k
{
1444
13.9k
  size = GMP_MAX (size, 1);
1445
1446
13.9k
  if (r->_mp_alloc)
1447
1.11k
    r->_mp_d = gmp_xrealloc_limbs (r->_mp_d, size);
1448
12.8k
  else
1449
12.8k
    r->_mp_d = gmp_xalloc_limbs (size);
1450
13.9k
  r->_mp_alloc = size;
1451
1452
13.9k
  if (GMP_ABS (r->_mp_size) > size)
1453
0
    r->_mp_size = 0;
1454
1455
13.9k
  return r->_mp_d;
1456
13.9k
}
1457
1458
/* Realloc for an mpz_t WHAT if it has less than NEEDED limbs.  */
1459
15.5k
#define MPZ_REALLOC(z,n) ((n) > (z)->_mp_alloc      \
1460
15.5k
        ? mpz_realloc(z,n)      \
1461
15.5k
        : (z)->_mp_d)
1462

1463
/* MPZ assignment and basic conversions. */
1464
void
1465
mpz_set_si (mpz_t r, signed long int x)
1466
0
{
1467
0
  if (x >= 0)
1468
0
    mpz_set_ui (r, x);
1469
0
  else /* (x < 0) */
1470
0
    if (GMP_LIMB_BITS < GMP_ULONG_BITS)
1471
0
      {
1472
0
  mpz_set_ui (r, GMP_NEG_CAST (unsigned long int, x));
1473
0
  mpz_neg (r, r);
1474
0
      }
1475
0
  else
1476
0
    {
1477
0
      r->_mp_size = -1;
1478
0
      MPZ_REALLOC (r, 1)[0] = GMP_NEG_CAST (unsigned long int, x);
1479
0
    }
1480
0
}
1481
1482
void
1483
mpz_set_ui (mpz_t r, unsigned long int x)
1484
1.40k
{
1485
1.40k
  if (x > 0)
1486
1.40k
    {
1487
1.40k
      r->_mp_size = 1;
1488
1.40k
      MPZ_REALLOC (r, 1)[0] = x;
1489
1.40k
      if (GMP_LIMB_BITS < GMP_ULONG_BITS)
1490
0
  {
1491
0
    int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS;
1492
0
    while (x >>= LOCAL_GMP_LIMB_BITS)
1493
0
      {
1494
0
        ++ r->_mp_size;
1495
0
        MPZ_REALLOC (r, r->_mp_size)[r->_mp_size - 1] = x;
1496
0
      }
1497
0
  }
1498
1.40k
    }
1499
0
  else
1500
0
    r->_mp_size = 0;
1501
1.40k
}
1502
1503
void
1504
mpz_set (mpz_t r, const mpz_t x)
1505
4.21k
{
1506
  /* Allow the NOP r == x */
1507
4.21k
  if (r != x)
1508
1.40k
    {
1509
1.40k
      mp_size_t n;
1510
1.40k
      mp_ptr rp;
1511
1512
1.40k
      n = GMP_ABS (x->_mp_size);
1513
1.40k
      rp = MPZ_REALLOC (r, n);
1514
1515
1.40k
      mpn_copyi (rp, x->_mp_d, n);
1516
1.40k
      r->_mp_size = x->_mp_size;
1517
1.40k
    }
1518
4.21k
}
1519
1520
void
1521
mpz_init_set_si (mpz_t r, signed long int x)
1522
0
{
1523
0
  mpz_init (r);
1524
0
  mpz_set_si (r, x);
1525
0
}
1526
1527
void
1528
mpz_init_set_ui (mpz_t r, unsigned long int x)
1529
1.40k
{
1530
1.40k
  mpz_init (r);
1531
1.40k
  mpz_set_ui (r, x);
1532
1.40k
}
1533
1534
void
1535
mpz_init_set (mpz_t r, const mpz_t x)
1536
1.40k
{
1537
1.40k
  mpz_init (r);
1538
1.40k
  mpz_set (r, x);
1539
1.40k
}
1540
1541
int
1542
mpz_fits_slong_p (const mpz_t u)
1543
0
{
1544
0
  return mpz_cmp_si (u, LONG_MAX) <= 0 && mpz_cmp_si (u, LONG_MIN) >= 0;
1545
0
}
1546
1547
static int
1548
mpn_absfits_ulong_p (mp_srcptr up, mp_size_t un)
1549
0
{
1550
0
  int ulongsize = GMP_ULONG_BITS / GMP_LIMB_BITS;
1551
0
  mp_limb_t ulongrem = 0;
1552
1553
0
  if (GMP_ULONG_BITS % GMP_LIMB_BITS != 0)
1554
0
    ulongrem = (mp_limb_t) (ULONG_MAX >> GMP_LIMB_BITS * ulongsize) + 1;
1555
1556
0
  return un <= ulongsize || (up[ulongsize] < ulongrem && un == ulongsize + 1);
1557
0
}
1558
1559
int
1560
mpz_fits_ulong_p (const mpz_t u)
1561
0
{
1562
0
  mp_size_t us = u->_mp_size;
1563
1564
0
  return us >= 0 && mpn_absfits_ulong_p (u->_mp_d, us);
1565
0
}
1566
1567
int
1568
mpz_fits_sint_p (const mpz_t u)
1569
0
{
1570
0
  return mpz_cmp_si (u, INT_MAX) <= 0 && mpz_cmp_si (u, INT_MIN) >= 0;
1571
0
}
1572
1573
int
1574
mpz_fits_uint_p (const mpz_t u)
1575
0
{
1576
0
  return u->_mp_size >= 0 && mpz_cmpabs_ui (u, UINT_MAX) <= 0;
1577
0
}
1578
1579
int
1580
mpz_fits_sshort_p (const mpz_t u)
1581
0
{
1582
0
  return mpz_cmp_si (u, SHRT_MAX) <= 0 && mpz_cmp_si (u, SHRT_MIN) >= 0;
1583
0
}
1584
1585
int
1586
mpz_fits_ushort_p (const mpz_t u)
1587
0
{
1588
0
  return u->_mp_size >= 0 && mpz_cmpabs_ui (u, USHRT_MAX) <= 0;
1589
0
}
1590
1591
long int
1592
mpz_get_si (const mpz_t u)
1593
0
{
1594
0
  unsigned long r = mpz_get_ui (u);
1595
0
  unsigned long c = -LONG_MAX - LONG_MIN;
1596
1597
0
  if (u->_mp_size < 0)
1598
    /* This expression is necessary to properly handle -LONG_MIN */
1599
0
    return -(long) c - (long) ((r - c) & LONG_MAX);
1600
0
  else
1601
0
    return (long) (r & LONG_MAX);
1602
0
}
1603
1604
unsigned long int
1605
mpz_get_ui (const mpz_t u)
1606
0
{
1607
0
  if (GMP_LIMB_BITS < GMP_ULONG_BITS)
1608
0
    {
1609
0
      int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS;
1610
0
      unsigned long r = 0;
1611
0
      mp_size_t n = GMP_ABS (u->_mp_size);
1612
0
      n = GMP_MIN (n, 1 + (mp_size_t) (GMP_ULONG_BITS - 1) / GMP_LIMB_BITS);
1613
0
      while (--n >= 0)
1614
0
  r = (r << LOCAL_GMP_LIMB_BITS) + u->_mp_d[n];
1615
0
      return r;
1616
0
    }
1617
1618
0
  return u->_mp_size == 0 ? 0 : u->_mp_d[0];
1619
0
}
1620
1621
size_t
1622
mpz_size (const mpz_t u)
1623
4.51k
{
1624
4.51k
  return GMP_ABS (u->_mp_size);
1625
4.51k
}
1626
1627
mp_limb_t
1628
mpz_getlimbn (const mpz_t u, mp_size_t n)
1629
0
{
1630
0
  if (n >= 0 && n < GMP_ABS (u->_mp_size))
1631
0
    return u->_mp_d[n];
1632
0
  else
1633
0
    return 0;
1634
0
}
1635
1636
void
1637
mpz_realloc2 (mpz_t x, mp_bitcnt_t n)
1638
0
{
1639
0
  mpz_realloc (x, 1 + (n - (n != 0)) / GMP_LIMB_BITS);
1640
0
}
1641
1642
mp_srcptr
1643
mpz_limbs_read (mpz_srcptr x)
1644
3.30k
{
1645
3.30k
  return x->_mp_d;
1646
3.30k
}
1647
1648
mp_ptr
1649
mpz_limbs_modify (mpz_t x, mp_size_t n)
1650
3.52k
{
1651
3.52k
  assert (n > 0);
1652
3.52k
  return MPZ_REALLOC (x, n);
1653
3.52k
}
1654
1655
mp_ptr
1656
mpz_limbs_write (mpz_t x, mp_size_t n)
1657
3.52k
{
1658
3.52k
  return mpz_limbs_modify (x, n);
1659
3.52k
}
1660
1661
void
1662
mpz_limbs_finish (mpz_t x, mp_size_t xs)
1663
8.62k
{
1664
8.62k
  mp_size_t xn;
1665
8.62k
  xn = mpn_normalized_size (x->_mp_d, GMP_ABS (xs));
1666
8.62k
  x->_mp_size = xs < 0 ? -xn : xn;
1667
8.62k
}
1668
1669
static mpz_srcptr
1670
mpz_roinit_normal_n (mpz_t x, mp_srcptr xp, mp_size_t xs)
1671
5.10k
{
1672
5.10k
  x->_mp_alloc = 0;
1673
5.10k
  x->_mp_d = (mp_ptr) xp;
1674
5.10k
  x->_mp_size = xs;
1675
5.10k
  return x;
1676
5.10k
}
1677
1678
mpz_srcptr
1679
mpz_roinit_n (mpz_t x, mp_srcptr xp, mp_size_t xs)
1680
5.10k
{
1681
5.10k
  mpz_roinit_normal_n (x, xp, xs);
1682
5.10k
  mpz_limbs_finish (x, xs);
1683
5.10k
  return x;
1684
5.10k
}
1685
1686

1687
/* Conversions and comparison to double. */
1688
void
1689
mpz_set_d (mpz_t r, double x)
1690
0
{
1691
0
  int sign;
1692
0
  mp_ptr rp;
1693
0
  mp_size_t rn, i;
1694
0
  double B;
1695
0
  double Bi;
1696
0
  mp_limb_t f;
1697
1698
  /* x != x is true when x is a NaN, and x == x * 0.5 is true when x is
1699
     zero or infinity. */
1700
0
  if (x != x || x == x * 0.5)
1701
0
    {
1702
0
      r->_mp_size = 0;
1703
0
      return;
1704
0
    }
1705
1706
0
  sign = x < 0.0 ;
1707
0
  if (sign)
1708
0
    x = - x;
1709
1710
0
  if (x < 1.0)
1711
0
    {
1712
0
      r->_mp_size = 0;
1713
0
      return;
1714
0
    }
1715
0
  B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1);
1716
0
  Bi = 1.0 / B;
1717
0
  for (rn = 1; x >= B; rn++)
1718
0
    x *= Bi;
1719
1720
0
  rp = MPZ_REALLOC (r, rn);
1721
1722
0
  f = (mp_limb_t) x;
1723
0
  x -= f;
1724
0
  assert (x < 1.0);
1725
0
  i = rn-1;
1726
0
  rp[i] = f;
1727
0
  while (--i >= 0)
1728
0
    {
1729
0
      x = B * x;
1730
0
      f = (mp_limb_t) x;
1731
0
      x -= f;
1732
0
      assert (x < 1.0);
1733
0
      rp[i] = f;
1734
0
    }
1735
1736
0
  r->_mp_size = sign ? - rn : rn;
1737
0
}
1738
1739
void
1740
mpz_init_set_d (mpz_t r, double x)
1741
0
{
1742
0
  mpz_init (r);
1743
0
  mpz_set_d (r, x);
1744
0
}
1745
1746
double
1747
mpz_get_d (const mpz_t u)
1748
0
{
1749
0
  int m;
1750
0
  mp_limb_t l;
1751
0
  mp_size_t un;
1752
0
  double x;
1753
0
  double B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1);
1754
1755
0
  un = GMP_ABS (u->_mp_size);
1756
1757
0
  if (un == 0)
1758
0
    return 0.0;
1759
1760
0
  l = u->_mp_d[--un];
1761
0
  gmp_clz (m, l);
1762
0
  m = m + GMP_DBL_MANT_BITS - GMP_LIMB_BITS;
1763
0
  if (m < 0)
1764
0
    l &= GMP_LIMB_MAX << -m;
1765
1766
0
  for (x = l; --un >= 0;)
1767
0
    {
1768
0
      x = B*x;
1769
0
      if (m > 0) {
1770
0
  l = u->_mp_d[un];
1771
0
  m -= GMP_LIMB_BITS;
1772
0
  if (m < 0)
1773
0
    l &= GMP_LIMB_MAX << -m;
1774
0
  x += l;
1775
0
      }
1776
0
    }
1777
1778
0
  if (u->_mp_size < 0)
1779
0
    x = -x;
1780
1781
0
  return x;
1782
0
}
1783
1784
int
1785
mpz_cmpabs_d (const mpz_t x, double d)
1786
0
{
1787
0
  mp_size_t xn;
1788
0
  double B, Bi;
1789
0
  mp_size_t i;
1790
1791
0
  xn = x->_mp_size;
1792
0
  d = GMP_ABS (d);
1793
1794
0
  if (xn != 0)
1795
0
    {
1796
0
      xn = GMP_ABS (xn);
1797
1798
0
      B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1);
1799
0
      Bi = 1.0 / B;
1800
1801
      /* Scale d so it can be compared with the top limb. */
1802
0
      for (i = 1; i < xn; i++)
1803
0
  d *= Bi;
1804
1805
0
      if (d >= B)
1806
0
  return -1;
1807
1808
      /* Compare floor(d) to top limb, subtract and cancel when equal. */
1809
0
      for (i = xn; i-- > 0;)
1810
0
  {
1811
0
    mp_limb_t f, xl;
1812
1813
0
    f = (mp_limb_t) d;
1814
0
    xl = x->_mp_d[i];
1815
0
    if (xl > f)
1816
0
      return 1;
1817
0
    else if (xl < f)
1818
0
      return -1;
1819
0
    d = B * (d - f);
1820
0
  }
1821
0
    }
1822
0
  return - (d > 0.0);
1823
0
}
1824
1825
int
1826
mpz_cmp_d (const mpz_t x, double d)
1827
0
{
1828
0
  if (x->_mp_size < 0)
1829
0
    {
1830
0
      if (d >= 0.0)
1831
0
  return -1;
1832
0
      else
1833
0
  return -mpz_cmpabs_d (x, d);
1834
0
    }
1835
0
  else
1836
0
    {
1837
0
      if (d < 0.0)
1838
0
  return 1;
1839
0
      else
1840
0
  return mpz_cmpabs_d (x, d);
1841
0
    }
1842
0
}
1843
1844

1845
/* MPZ comparisons and the like. */
1846
int
1847
mpz_sgn (const mpz_t u)
1848
7.39k
{
1849
7.39k
  return GMP_CMP (u->_mp_size, 0);
1850
7.39k
}
1851
1852
int
1853
mpz_cmp_si (const mpz_t u, long v)
1854
0
{
1855
0
  mp_size_t usize = u->_mp_size;
1856
1857
0
  if (v >= 0)
1858
0
    return mpz_cmp_ui (u, v);
1859
0
  else if (usize >= 0)
1860
0
    return 1;
1861
0
  else
1862
0
    return - mpz_cmpabs_ui (u, GMP_NEG_CAST (unsigned long int, v));
1863
0
}
1864
1865
int
1866
mpz_cmp_ui (const mpz_t u, unsigned long v)
1867
0
{
1868
0
  mp_size_t usize = u->_mp_size;
1869
1870
0
  if (usize < 0)
1871
0
    return -1;
1872
0
  else
1873
0
    return mpz_cmpabs_ui (u, v);
1874
0
}
1875
1876
int
1877
mpz_cmp (const mpz_t a, const mpz_t b)
1878
3.72k
{
1879
3.72k
  mp_size_t asize = a->_mp_size;
1880
3.72k
  mp_size_t bsize = b->_mp_size;
1881
1882
3.72k
  if (asize != bsize)
1883
1.81k
    return (asize < bsize) ? -1 : 1;
1884
1.90k
  else if (asize >= 0)
1885
1.90k
    return mpn_cmp (a->_mp_d, b->_mp_d, asize);
1886
0
  else
1887
0
    return mpn_cmp (b->_mp_d, a->_mp_d, -asize);
1888
3.72k
}
1889
1890
int
1891
mpz_cmpabs_ui (const mpz_t u, unsigned long v)
1892
0
{
1893
0
  mp_size_t un = GMP_ABS (u->_mp_size);
1894
1895
0
  if (! mpn_absfits_ulong_p (u->_mp_d, un))
1896
0
    return 1;
1897
0
  else
1898
0
    {
1899
0
      unsigned long uu = mpz_get_ui (u);
1900
0
      return GMP_CMP(uu, v);
1901
0
    }
1902
0
}
1903
1904
int
1905
mpz_cmpabs (const mpz_t u, const mpz_t v)
1906
0
{
1907
0
  return mpn_cmp4 (u->_mp_d, GMP_ABS (u->_mp_size),
1908
0
       v->_mp_d, GMP_ABS (v->_mp_size));
1909
0
}
1910
1911
void
1912
mpz_abs (mpz_t r, const mpz_t u)
1913
0
{
1914
0
  mpz_set (r, u);
1915
0
  r->_mp_size = GMP_ABS (r->_mp_size);
1916
0
}
1917
1918
void
1919
mpz_neg (mpz_t r, const mpz_t u)
1920
2.81k
{
1921
2.81k
  mpz_set (r, u);
1922
2.81k
  r->_mp_size = -r->_mp_size;
1923
2.81k
}
1924
1925
void
1926
mpz_swap (mpz_t u, mpz_t v)
1927
3.89k
{
1928
3.89k
  MP_SIZE_T_SWAP (u->_mp_size, v->_mp_size);
1929
3.89k
  MP_SIZE_T_SWAP (u->_mp_alloc, v->_mp_alloc);
1930
3.89k
  MP_PTR_SWAP (u->_mp_d, v->_mp_d);
1931
3.89k
}
1932
1933

1934
/* MPZ addition and subtraction */
1935
1936
1937
void
1938
mpz_add_ui (mpz_t r, const mpz_t a, unsigned long b)
1939
1.40k
{
1940
1.40k
  mpz_t bb;
1941
1.40k
  mpz_init_set_ui (bb, b);
1942
1.40k
  mpz_add (r, a, bb);
1943
1.40k
  mpz_clear (bb);
1944
1.40k
}
1945
1946
void
1947
mpz_sub_ui (mpz_t r, const mpz_t a, unsigned long b)
1948
1.40k
{
1949
1.40k
  mpz_ui_sub (r, b, a);
1950
1.40k
  mpz_neg (r, r);
1951
1.40k
}
1952
1953
void
1954
mpz_ui_sub (mpz_t r, unsigned long a, const mpz_t b)
1955
1.40k
{
1956
1.40k
  mpz_neg (r, b);
1957
1.40k
  mpz_add_ui (r, r, a);
1958
1.40k
}
1959
1960
static mp_size_t
1961
mpz_abs_add (mpz_t r, const mpz_t a, const mpz_t b)
1962
1.44k
{
1963
1.44k
  mp_size_t an = GMP_ABS (a->_mp_size);
1964
1.44k
  mp_size_t bn = GMP_ABS (b->_mp_size);
1965
1.44k
  mp_ptr rp;
1966
1.44k
  mp_limb_t cy;
1967
1968
1.44k
  if (an < bn)
1969
234
    {
1970
234
      MPZ_SRCPTR_SWAP (a, b);
1971
234
      MP_SIZE_T_SWAP (an, bn);
1972
234
    }
1973
1974
1.44k
  rp = MPZ_REALLOC (r, an + 1);
1975
1.44k
  cy = mpn_add (rp, a->_mp_d, an, b->_mp_d, bn);
1976
1977
1.44k
  rp[an] = cy;
1978
1979
1.44k
  return an + cy;
1980
1.44k
}
1981
1982
static mp_size_t
1983
mpz_abs_sub (mpz_t r, const mpz_t a, const mpz_t b)
1984
2.76k
{
1985
2.76k
  mp_size_t an = GMP_ABS (a->_mp_size);
1986
2.76k
  mp_size_t bn = GMP_ABS (b->_mp_size);
1987
2.76k
  int cmp;
1988
2.76k
  mp_ptr rp;
1989
1990
2.76k
  cmp = mpn_cmp4 (a->_mp_d, an, b->_mp_d, bn);
1991
2.76k
  if (cmp > 0)
1992
1.61k
    {
1993
1.61k
      rp = MPZ_REALLOC (r, an);
1994
1.61k
      gmp_assert_nocarry (mpn_sub (rp, a->_mp_d, an, b->_mp_d, bn));
1995
1.61k
      return mpn_normalized_size (rp, an);
1996
1.61k
    }
1997
1.15k
  else if (cmp < 0)
1998
1.15k
    {
1999
1.15k
      rp = MPZ_REALLOC (r, bn);
2000
1.15k
      gmp_assert_nocarry (mpn_sub (rp, b->_mp_d, bn, a->_mp_d, an));
2001
1.15k
      return -mpn_normalized_size (rp, bn);
2002
1.15k
    }
2003
0
  else
2004
0
    return 0;
2005
2.76k
}
2006
2007
void
2008
mpz_add (mpz_t r, const mpz_t a, const mpz_t b)
2009
2.81k
{
2010
2.81k
  mp_size_t rn;
2011
2012
2.81k
  if ( (a->_mp_size ^ b->_mp_size) >= 0)
2013
1.44k
    rn = mpz_abs_add (r, a, b);
2014
1.36k
  else
2015
1.36k
    rn = mpz_abs_sub (r, a, b);
2016
2017
2.81k
  r->_mp_size = a->_mp_size >= 0 ? rn : - rn;
2018
2.81k
}
2019
2020
void
2021
mpz_sub (mpz_t r, const mpz_t a, const mpz_t b)
2022
1.40k
{
2023
1.40k
  mp_size_t rn;
2024
2025
1.40k
  if ( (a->_mp_size ^ b->_mp_size) >= 0)
2026
1.40k
    rn = mpz_abs_sub (r, a, b);
2027
0
  else
2028
0
    rn = mpz_abs_add (r, a, b);
2029
2030
1.40k
  r->_mp_size = a->_mp_size >= 0 ? rn : - rn;
2031
1.40k
}
2032
2033

2034
/* MPZ multiplication */
2035
void
2036
mpz_mul_si (mpz_t r, const mpz_t u, long int v)
2037
0
{
2038
0
  if (v < 0)
2039
0
    {
2040
0
      mpz_mul_ui (r, u, GMP_NEG_CAST (unsigned long int, v));
2041
0
      mpz_neg (r, r);
2042
0
    }
2043
0
  else
2044
0
    mpz_mul_ui (r, u, v);
2045
0
}
2046
2047
void
2048
mpz_mul_ui (mpz_t r, const mpz_t u, unsigned long int v)
2049
0
{
2050
0
  mpz_t vv;
2051
0
  mpz_init_set_ui (vv, v);
2052
0
  mpz_mul (r, u, vv);
2053
0
  mpz_clear (vv);
2054
0
  return;
2055
0
}
2056
2057
void
2058
mpz_mul (mpz_t r, const mpz_t u, const mpz_t v)
2059
4.21k
{
2060
4.21k
  int sign;
2061
4.21k
  mp_size_t un, vn, rn;
2062
4.21k
  mpz_t t;
2063
4.21k
  mp_ptr tp;
2064
2065
4.21k
  un = u->_mp_size;
2066
4.21k
  vn = v->_mp_size;
2067
2068
4.21k
  if (un == 0 || vn == 0)
2069
321
    {
2070
321
      r->_mp_size = 0;
2071
321
      return;
2072
321
    }
2073
2074
3.89k
  sign = (un ^ vn) < 0;
2075
2076
3.89k
  un = GMP_ABS (un);
2077
3.89k
  vn = GMP_ABS (vn);
2078
2079
3.89k
  mpz_init2 (t, (un + vn) * GMP_LIMB_BITS);
2080
2081
3.89k
  tp = t->_mp_d;
2082
3.89k
  if (un >= vn)
2083
3.89k
    mpn_mul (tp, u->_mp_d, un, v->_mp_d, vn);
2084
0
  else
2085
0
    mpn_mul (tp, v->_mp_d, vn, u->_mp_d, un);
2086
2087
3.89k
  rn = un + vn;
2088
3.89k
  rn -= tp[rn-1] == 0;
2089
2090
3.89k
  t->_mp_size = sign ? - rn : rn;
2091
3.89k
  mpz_swap (r, t);
2092
3.89k
  mpz_clear (t);
2093
3.89k
}
2094
2095
void
2096
mpz_mul_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bits)
2097
0
{
2098
0
  mp_size_t un, rn;
2099
0
  mp_size_t limbs;
2100
0
  unsigned shift;
2101
0
  mp_ptr rp;
2102
2103
0
  un = GMP_ABS (u->_mp_size);
2104
0
  if (un == 0)
2105
0
    {
2106
0
      r->_mp_size = 0;
2107
0
      return;
2108
0
    }
2109
2110
0
  limbs = bits / GMP_LIMB_BITS;
2111
0
  shift = bits % GMP_LIMB_BITS;
2112
2113
0
  rn = un + limbs + (shift > 0);
2114
0
  rp = MPZ_REALLOC (r, rn);
2115
0
  if (shift > 0)
2116
0
    {
2117
0
      mp_limb_t cy = mpn_lshift (rp + limbs, u->_mp_d, un, shift);
2118
0
      rp[rn-1] = cy;
2119
0
      rn -= (cy == 0);
2120
0
    }
2121
0
  else
2122
0
    mpn_copyd (rp + limbs, u->_mp_d, un);
2123
2124
0
  mpn_zero (rp, limbs);
2125
2126
0
  r->_mp_size = (u->_mp_size < 0) ? - rn : rn;
2127
0
}
2128
2129
void
2130
mpz_addmul_ui (mpz_t r, const mpz_t u, unsigned long int v)
2131
0
{
2132
0
  mpz_t t;
2133
0
  mpz_init_set_ui (t, v);
2134
0
  mpz_mul (t, u, t);
2135
0
  mpz_add (r, r, t);
2136
0
  mpz_clear (t);
2137
0
}
2138
2139
void
2140
mpz_submul_ui (mpz_t r, const mpz_t u, unsigned long int v)
2141
0
{
2142
0
  mpz_t t;
2143
0
  mpz_init_set_ui (t, v);
2144
0
  mpz_mul (t, u, t);
2145
0
  mpz_sub (r, r, t);
2146
0
  mpz_clear (t);
2147
0
}
2148
2149
void
2150
mpz_addmul (mpz_t r, const mpz_t u, const mpz_t v)
2151
0
{
2152
0
  mpz_t t;
2153
0
  mpz_init (t);
2154
0
  mpz_mul (t, u, v);
2155
0
  mpz_add (r, r, t);
2156
0
  mpz_clear (t);
2157
0
}
2158
2159
void
2160
mpz_submul (mpz_t r, const mpz_t u, const mpz_t v)
2161
0
{
2162
0
  mpz_t t;
2163
0
  mpz_init (t);
2164
0
  mpz_mul (t, u, v);
2165
0
  mpz_sub (r, r, t);
2166
0
  mpz_clear (t);
2167
0
}
2168
2169

2170
/* MPZ division */
2171
enum mpz_div_round_mode { GMP_DIV_FLOOR, GMP_DIV_CEIL, GMP_DIV_TRUNC };
2172
2173
/* Allows q or r to be zero. Returns 1 iff remainder is non-zero. */
2174
static int
2175
mpz_div_qr (mpz_t q, mpz_t r,
2176
      const mpz_t n, const mpz_t d, enum mpz_div_round_mode mode)
2177
1.40k
{
2178
1.40k
  mp_size_t ns, ds, nn, dn, qs;
2179
1.40k
  ns = n->_mp_size;
2180
1.40k
  ds = d->_mp_size;
2181
2182
1.40k
  if (ds == 0)
2183
0
    gmp_die("mpz_div_qr: Divide by zero.");
2184
2185
1.40k
  if (ns == 0)
2186
0
    {
2187
0
      if (q)
2188
0
  q->_mp_size = 0;
2189
0
      if (r)
2190
0
  r->_mp_size = 0;
2191
0
      return 0;
2192
0
    }
2193
2194
1.40k
  nn = GMP_ABS (ns);
2195
1.40k
  dn = GMP_ABS (ds);
2196
2197
1.40k
  qs = ds ^ ns;
2198
2199
1.40k
  if (nn < dn)
2200
0
    {
2201
0
      if (mode == GMP_DIV_CEIL && qs >= 0)
2202
0
  {
2203
    /* q = 1, r = n - d */
2204
0
    if (r)
2205
0
      mpz_sub (r, n, d);
2206
0
    if (q)
2207
0
      mpz_set_ui (q, 1);
2208
0
  }
2209
0
      else if (mode == GMP_DIV_FLOOR && qs < 0)
2210
0
  {
2211
    /* q = -1, r = n + d */
2212
0
    if (r)
2213
0
      mpz_add (r, n, d);
2214
0
    if (q)
2215
0
      mpz_set_si (q, -1);
2216
0
  }
2217
0
      else
2218
0
  {
2219
    /* q = 0, r = d */
2220
0
    if (r)
2221
0
      mpz_set (r, n);
2222
0
    if (q)
2223
0
      q->_mp_size = 0;
2224
0
  }
2225
0
      return 1;
2226
0
    }
2227
1.40k
  else
2228
1.40k
    {
2229
1.40k
      mp_ptr np, qp;
2230
1.40k
      mp_size_t qn, rn;
2231
1.40k
      mpz_t tq, tr;
2232
2233
1.40k
      mpz_init_set (tr, n);
2234
1.40k
      np = tr->_mp_d;
2235
2236
1.40k
      qn = nn - dn + 1;
2237
2238
1.40k
      if (q)
2239
0
  {
2240
0
    mpz_init2 (tq, qn * GMP_LIMB_BITS);
2241
0
    qp = tq->_mp_d;
2242
0
  }
2243
1.40k
      else
2244
1.40k
  qp = NULL;
2245
2246
1.40k
      mpn_div_qr (qp, np, nn, d->_mp_d, dn);
2247
2248
1.40k
      if (qp)
2249
0
  {
2250
0
    qn -= (qp[qn-1] == 0);
2251
2252
0
    tq->_mp_size = qs < 0 ? -qn : qn;
2253
0
  }
2254
1.40k
      rn = mpn_normalized_size (np, dn);
2255
1.40k
      tr->_mp_size = ns < 0 ? - rn : rn;
2256
2257
1.40k
      if (mode == GMP_DIV_FLOOR && qs < 0 && rn != 0)
2258
0
  {
2259
0
    if (q)
2260
0
      mpz_sub_ui (tq, tq, 1);
2261
0
    if (r)
2262
0
      mpz_add (tr, tr, d);
2263
0
  }
2264
1.40k
      else if (mode == GMP_DIV_CEIL && qs >= 0 && rn != 0)
2265
0
  {
2266
0
    if (q)
2267
0
      mpz_add_ui (tq, tq, 1);
2268
0
    if (r)
2269
0
      mpz_sub (tr, tr, d);
2270
0
  }
2271
2272
1.40k
      if (q)
2273
0
  {
2274
0
    mpz_swap (tq, q);
2275
0
    mpz_clear (tq);
2276
0
  }
2277
1.40k
      if (r)
2278
0
  mpz_swap (tr, r);
2279
2280
1.40k
      mpz_clear (tr);
2281
2282
1.40k
      return rn != 0;
2283
1.40k
    }
2284
1.40k
}
2285
2286
void
2287
mpz_cdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d)
2288
0
{
2289
0
  mpz_div_qr (q, r, n, d, GMP_DIV_CEIL);
2290
0
}
2291
2292
void
2293
mpz_fdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d)
2294
0
{
2295
0
  mpz_div_qr (q, r, n, d, GMP_DIV_FLOOR);
2296
0
}
2297
2298
void
2299
mpz_tdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d)
2300
0
{
2301
0
  mpz_div_qr (q, r, n, d, GMP_DIV_TRUNC);
2302
0
}
2303
2304
void
2305
mpz_cdiv_q (mpz_t q, const mpz_t n, const mpz_t d)
2306
0
{
2307
0
  mpz_div_qr (q, NULL, n, d, GMP_DIV_CEIL);
2308
0
}
2309
2310
void
2311
mpz_fdiv_q (mpz_t q, const mpz_t n, const mpz_t d)
2312
0
{
2313
0
  mpz_div_qr (q, NULL, n, d, GMP_DIV_FLOOR);
2314
0
}
2315
2316
void
2317
mpz_tdiv_q (mpz_t q, const mpz_t n, const mpz_t d)
2318
0
{
2319
0
  mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC);
2320
0
}
2321
2322
void
2323
mpz_cdiv_r (mpz_t r, const mpz_t n, const mpz_t d)
2324
0
{
2325
0
  mpz_div_qr (NULL, r, n, d, GMP_DIV_CEIL);
2326
0
}
2327
2328
void
2329
mpz_fdiv_r (mpz_t r, const mpz_t n, const mpz_t d)
2330
0
{
2331
0
  mpz_div_qr (NULL, r, n, d, GMP_DIV_FLOOR);
2332
0
}
2333
2334
void
2335
mpz_tdiv_r (mpz_t r, const mpz_t n, const mpz_t d)
2336
0
{
2337
0
  mpz_div_qr (NULL, r, n, d, GMP_DIV_TRUNC);
2338
0
}
2339
2340
void
2341
mpz_mod (mpz_t r, const mpz_t n, const mpz_t d)
2342
0
{
2343
0
  mpz_div_qr (NULL, r, n, d, d->_mp_size >= 0 ? GMP_DIV_FLOOR : GMP_DIV_CEIL);
2344
0
}
2345
2346
static void
2347
mpz_div_q_2exp (mpz_t q, const mpz_t u, mp_bitcnt_t bit_index,
2348
    enum mpz_div_round_mode mode)
2349
0
{
2350
0
  mp_size_t un, qn;
2351
0
  mp_size_t limb_cnt;
2352
0
  mp_ptr qp;
2353
0
  int adjust;
2354
2355
0
  un = u->_mp_size;
2356
0
  if (un == 0)
2357
0
    {
2358
0
      q->_mp_size = 0;
2359
0
      return;
2360
0
    }
2361
0
  limb_cnt = bit_index / GMP_LIMB_BITS;
2362
0
  qn = GMP_ABS (un) - limb_cnt;
2363
0
  bit_index %= GMP_LIMB_BITS;
2364
2365
0
  if (mode == ((un > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* un != 0 here. */
2366
    /* Note: Below, the final indexing at limb_cnt is valid because at
2367
       that point we have qn > 0. */
2368
0
    adjust = (qn <= 0
2369
0
        || !mpn_zero_p (u->_mp_d, limb_cnt)
2370
0
        || (u->_mp_d[limb_cnt]
2371
0
      & (((mp_limb_t) 1 << bit_index) - 1)));
2372
0
  else
2373
0
    adjust = 0;
2374
2375
0
  if (qn <= 0)
2376
0
    qn = 0;
2377
0
  else
2378
0
    {
2379
0
      qp = MPZ_REALLOC (q, qn);
2380
2381
0
      if (bit_index != 0)
2382
0
  {
2383
0
    mpn_rshift (qp, u->_mp_d + limb_cnt, qn, bit_index);
2384
0
    qn -= qp[qn - 1] == 0;
2385
0
  }
2386
0
      else
2387
0
  {
2388
0
    mpn_copyi (qp, u->_mp_d + limb_cnt, qn);
2389
0
  }
2390
0
    }
2391
2392
0
  q->_mp_size = qn;
2393
2394
0
  if (adjust)
2395
0
    mpz_add_ui (q, q, 1);
2396
0
  if (un < 0)
2397
0
    mpz_neg (q, q);
2398
0
}
2399
2400
static void
2401
mpz_div_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bit_index,
2402
    enum mpz_div_round_mode mode)
2403
0
{
2404
0
  mp_size_t us, un, rn;
2405
0
  mp_ptr rp;
2406
0
  mp_limb_t mask;
2407
2408
0
  us = u->_mp_size;
2409
0
  if (us == 0 || bit_index == 0)
2410
0
    {
2411
0
      r->_mp_size = 0;
2412
0
      return;
2413
0
    }
2414
0
  rn = (bit_index + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS;
2415
0
  assert (rn > 0);
2416
2417
0
  rp = MPZ_REALLOC (r, rn);
2418
0
  un = GMP_ABS (us);
2419
2420
0
  mask = GMP_LIMB_MAX >> (rn * GMP_LIMB_BITS - bit_index);
2421
2422
0
  if (rn > un)
2423
0
    {
2424
      /* Quotient (with truncation) is zero, and remainder is
2425
   non-zero */
2426
0
      if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */
2427
0
  {
2428
    /* Have to negate and sign extend. */
2429
0
    mp_size_t i;
2430
2431
0
    gmp_assert_nocarry (! mpn_neg (rp, u->_mp_d, un));
2432
0
    for (i = un; i < rn - 1; i++)
2433
0
      rp[i] = GMP_LIMB_MAX;
2434
2435
0
    rp[rn-1] = mask;
2436
0
    us = -us;
2437
0
  }
2438
0
      else
2439
0
  {
2440
    /* Just copy */
2441
0
    if (r != u)
2442
0
      mpn_copyi (rp, u->_mp_d, un);
2443
2444
0
    rn = un;
2445
0
  }
2446
0
    }
2447
0
  else
2448
0
    {
2449
0
      if (r != u)
2450
0
  mpn_copyi (rp, u->_mp_d, rn - 1);
2451
2452
0
      rp[rn-1] = u->_mp_d[rn-1] & mask;
2453
2454
0
      if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */
2455
0
  {
2456
    /* If r != 0, compute 2^{bit_count} - r. */
2457
0
    mpn_neg (rp, rp, rn);
2458
2459
0
    rp[rn-1] &= mask;
2460
2461
    /* us is not used for anything else, so we can modify it
2462
       here to indicate flipped sign. */
2463
0
    us = -us;
2464
0
  }
2465
0
    }
2466
0
  rn = mpn_normalized_size (rp, rn);
2467
0
  r->_mp_size = us < 0 ? -rn : rn;
2468
0
}
2469
2470
void
2471
mpz_cdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
2472
0
{
2473
0
  mpz_div_q_2exp (r, u, cnt, GMP_DIV_CEIL);
2474
0
}
2475
2476
void
2477
mpz_fdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
2478
0
{
2479
0
  mpz_div_q_2exp (r, u, cnt, GMP_DIV_FLOOR);
2480
0
}
2481
2482
void
2483
mpz_tdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
2484
0
{
2485
0
  mpz_div_q_2exp (r, u, cnt, GMP_DIV_TRUNC);
2486
0
}
2487
2488
void
2489
mpz_cdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
2490
0
{
2491
0
  mpz_div_r_2exp (r, u, cnt, GMP_DIV_CEIL);
2492
0
}
2493
2494
void
2495
mpz_fdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
2496
0
{
2497
0
  mpz_div_r_2exp (r, u, cnt, GMP_DIV_FLOOR);
2498
0
}
2499
2500
void
2501
mpz_tdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
2502
0
{
2503
0
  mpz_div_r_2exp (r, u, cnt, GMP_DIV_TRUNC);
2504
0
}
2505
2506
void
2507
mpz_divexact (mpz_t q, const mpz_t n, const mpz_t d)
2508
0
{
2509
0
  gmp_assert_nocarry (mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC));
2510
0
}
2511
2512
int
2513
mpz_divisible_p (const mpz_t n, const mpz_t d)
2514
1.40k
{
2515
1.40k
  return mpz_div_qr (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0;
2516
1.40k
}
2517
2518
int
2519
mpz_congruent_p (const mpz_t a, const mpz_t b, const mpz_t m)
2520
1.40k
{
2521
1.40k
  mpz_t t;
2522
1.40k
  int res;
2523
2524
  /* a == b (mod 0) iff a == b */
2525
1.40k
  if (mpz_sgn (m) == 0)
2526
0
    return (mpz_cmp (a, b) == 0);
2527
2528
1.40k
  mpz_init (t);
2529
1.40k
  mpz_sub (t, a, b);
2530
1.40k
  res = mpz_divisible_p (t, m);
2531
1.40k
  mpz_clear (t);
2532
2533
1.40k
  return res;
2534
1.40k
}
2535
2536
static unsigned long
2537
mpz_div_qr_ui (mpz_t q, mpz_t r,
2538
         const mpz_t n, unsigned long d, enum mpz_div_round_mode mode)
2539
0
{
2540
0
  unsigned long ret;
2541
0
  mpz_t rr, dd;
2542
2543
0
  mpz_init (rr);
2544
0
  mpz_init_set_ui (dd, d);
2545
0
  mpz_div_qr (q, rr, n, dd, mode);
2546
0
  mpz_clear (dd);
2547
0
  ret = mpz_get_ui (rr);
2548
2549
0
  if (r)
2550
0
    mpz_swap (r, rr);
2551
0
  mpz_clear (rr);
2552
2553
0
  return ret;
2554
0
}
2555
2556
unsigned long
2557
mpz_cdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d)
2558
0
{
2559
0
  return mpz_div_qr_ui (q, r, n, d, GMP_DIV_CEIL);
2560
0
}
2561
2562
unsigned long
2563
mpz_fdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d)
2564
0
{
2565
0
  return mpz_div_qr_ui (q, r, n, d, GMP_DIV_FLOOR);
2566
0
}
2567
2568
unsigned long
2569
mpz_tdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d)
2570
0
{
2571
0
  return mpz_div_qr_ui (q, r, n, d, GMP_DIV_TRUNC);
2572
0
}
2573
2574
unsigned long
2575
mpz_cdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d)
2576
0
{
2577
0
  return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_CEIL);
2578
0
}
2579
2580
unsigned long
2581
mpz_fdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d)
2582
0
{
2583
0
  return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_FLOOR);
2584
0
}
2585
2586
unsigned long
2587
mpz_tdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d)
2588
0
{
2589
0
  return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC);
2590
0
}
2591
2592
unsigned long
2593
mpz_cdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d)
2594
0
{
2595
0
  return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_CEIL);
2596
0
}
2597
unsigned long
2598
mpz_fdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d)
2599
0
{
2600
0
  return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR);
2601
0
}
2602
unsigned long
2603
mpz_tdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d)
2604
0
{
2605
0
  return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_TRUNC);
2606
0
}
2607
2608
unsigned long
2609
mpz_cdiv_ui (const mpz_t n, unsigned long d)
2610
0
{
2611
0
  return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_CEIL);
2612
0
}
2613
2614
unsigned long
2615
mpz_fdiv_ui (const mpz_t n, unsigned long d)
2616
0
{
2617
0
  return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_FLOOR);
2618
0
}
2619
2620
unsigned long
2621
mpz_tdiv_ui (const mpz_t n, unsigned long d)
2622
0
{
2623
0
  return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC);
2624
0
}
2625
2626
unsigned long
2627
mpz_mod_ui (mpz_t r, const mpz_t n, unsigned long d)
2628
0
{
2629
0
  return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR);
2630
0
}
2631
2632
void
2633
mpz_divexact_ui (mpz_t q, const mpz_t n, unsigned long d)
2634
0
{
2635
0
  gmp_assert_nocarry (mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC));
2636
0
}
2637
2638
int
2639
mpz_divisible_ui_p (const mpz_t n, unsigned long d)
2640
0
{
2641
0
  return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0;
2642
0
}
2643
2644

2645
/* GCD */
2646
static mp_limb_t
2647
mpn_gcd_11 (mp_limb_t u, mp_limb_t v)
2648
0
{
2649
0
  unsigned shift;
2650
2651
0
  assert ( (u | v) > 0);
2652
2653
0
  if (u == 0)
2654
0
    return v;
2655
0
  else if (v == 0)
2656
0
    return u;
2657
2658
0
  gmp_ctz (shift, u | v);
2659
2660
0
  u >>= shift;
2661
0
  v >>= shift;
2662
2663
0
  if ( (u & 1) == 0)
2664
0
    MP_LIMB_T_SWAP (u, v);
2665
2666
0
  while ( (v & 1) == 0)
2667
0
    v >>= 1;
2668
2669
0
  while (u != v)
2670
0
    {
2671
0
      if (u > v)
2672
0
  {
2673
0
    u -= v;
2674
0
    do
2675
0
      u >>= 1;
2676
0
    while ( (u & 1) == 0);
2677
0
  }
2678
0
      else
2679
0
  {
2680
0
    v -= u;
2681
0
    do
2682
0
      v >>= 1;
2683
0
    while ( (v & 1) == 0);
2684
0
  }
2685
0
    }
2686
0
  return u << shift;
2687
0
}
2688
2689
unsigned long
2690
mpz_gcd_ui (mpz_t g, const mpz_t u, unsigned long v)
2691
0
{
2692
0
  mpz_t t;
2693
0
  mpz_init_set_ui(t, v);
2694
0
  mpz_gcd (t, u, t);
2695
0
  if (v > 0)
2696
0
    v = mpz_get_ui (t);
2697
2698
0
  if (g)
2699
0
    mpz_swap (t, g);
2700
2701
0
  mpz_clear (t);
2702
2703
0
  return v;
2704
0
}
2705
2706
static mp_bitcnt_t
2707
mpz_make_odd (mpz_t r)
2708
0
{
2709
0
  mp_bitcnt_t shift;
2710
2711
0
  assert (r->_mp_size > 0);
2712
  /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */
2713
0
  shift = mpn_common_scan (r->_mp_d[0], 0, r->_mp_d, 0, 0);
2714
0
  mpz_tdiv_q_2exp (r, r, shift);
2715
2716
0
  return shift;
2717
0
}
2718
2719
void
2720
mpz_gcd (mpz_t g, const mpz_t u, const mpz_t v)
2721
0
{
2722
0
  mpz_t tu, tv;
2723
0
  mp_bitcnt_t uz, vz, gz;
2724
2725
0
  if (u->_mp_size == 0)
2726
0
    {
2727
0
      mpz_abs (g, v);
2728
0
      return;
2729
0
    }
2730
0
  if (v->_mp_size == 0)
2731
0
    {
2732
0
      mpz_abs (g, u);
2733
0
      return;
2734
0
    }
2735
2736
0
  mpz_init (tu);
2737
0
  mpz_init (tv);
2738
2739
0
  mpz_abs (tu, u);
2740
0
  uz = mpz_make_odd (tu);
2741
0
  mpz_abs (tv, v);
2742
0
  vz = mpz_make_odd (tv);
2743
0
  gz = GMP_MIN (uz, vz);
2744
2745
0
  if (tu->_mp_size < tv->_mp_size)
2746
0
    mpz_swap (tu, tv);
2747
2748
0
  mpz_tdiv_r (tu, tu, tv);
2749
0
  if (tu->_mp_size == 0)
2750
0
    {
2751
0
      mpz_swap (g, tv);
2752
0
    }
2753
0
  else
2754
0
    for (;;)
2755
0
      {
2756
0
  int c;
2757
2758
0
  mpz_make_odd (tu);
2759
0
  c = mpz_cmp (tu, tv);
2760
0
  if (c == 0)
2761
0
    {
2762
0
      mpz_swap (g, tu);
2763
0
      break;
2764
0
    }
2765
0
  if (c < 0)
2766
0
    mpz_swap (tu, tv);
2767
2768
0
  if (tv->_mp_size == 1)
2769
0
    {
2770
0
      mp_limb_t vl = tv->_mp_d[0];
2771
0
      mp_limb_t ul = mpz_tdiv_ui (tu, vl);
2772
0
      mpz_set_ui (g, mpn_gcd_11 (ul, vl));
2773
0
      break;
2774
0
    }
2775
0
  mpz_sub (tu, tu, tv);
2776
0
      }
2777
0
  mpz_clear (tu);
2778
0
  mpz_clear (tv);
2779
0
  mpz_mul_2exp (g, g, gz);
2780
0
}
2781
2782
void
2783
mpz_gcdext (mpz_t g, mpz_t s, mpz_t t, const mpz_t u, const mpz_t v)
2784
0
{
2785
0
  mpz_t tu, tv, s0, s1, t0, t1;
2786
0
  mp_bitcnt_t uz, vz, gz;
2787
0
  mp_bitcnt_t power;
2788
2789
0
  if (u->_mp_size == 0)
2790
0
    {
2791
      /* g = 0 u + sgn(v) v */
2792
0
      signed long sign = mpz_sgn (v);
2793
0
      mpz_abs (g, v);
2794
0
      if (s)
2795
0
  s->_mp_size = 0;
2796
0
      if (t)
2797
0
  mpz_set_si (t, sign);
2798
0
      return;
2799
0
    }
2800
2801
0
  if (v->_mp_size == 0)
2802
0
    {
2803
      /* g = sgn(u) u + 0 v */
2804
0
      signed long sign = mpz_sgn (u);
2805
0
      mpz_abs (g, u);
2806
0
      if (s)
2807
0
  mpz_set_si (s, sign);
2808
0
      if (t)
2809
0
  t->_mp_size = 0;
2810
0
      return;
2811
0
    }
2812
2813
0
  mpz_init (tu);
2814
0
  mpz_init (tv);
2815
0
  mpz_init (s0);
2816
0
  mpz_init (s1);
2817
0
  mpz_init (t0);
2818
0
  mpz_init (t1);
2819
2820
0
  mpz_abs (tu, u);
2821
0
  uz = mpz_make_odd (tu);
2822
0
  mpz_abs (tv, v);
2823
0
  vz = mpz_make_odd (tv);
2824
0
  gz = GMP_MIN (uz, vz);
2825
2826
0
  uz -= gz;
2827
0
  vz -= gz;
2828
2829
  /* Cofactors corresponding to odd gcd. gz handled later. */
2830
0
  if (tu->_mp_size < tv->_mp_size)
2831
0
    {
2832
0
      mpz_swap (tu, tv);
2833
0
      MPZ_SRCPTR_SWAP (u, v);
2834
0
      MPZ_PTR_SWAP (s, t);
2835
0
      MP_BITCNT_T_SWAP (uz, vz);
2836
0
    }
2837
2838
  /* Maintain
2839
   *
2840
   * u = t0 tu + t1 tv
2841
   * v = s0 tu + s1 tv
2842
   *
2843
   * where u and v denote the inputs with common factors of two
2844
   * eliminated, and det (s0, t0; s1, t1) = 2^p. Then
2845
   *
2846
   * 2^p tu =  s1 u - t1 v
2847
   * 2^p tv = -s0 u + t0 v
2848
   */
2849
2850
  /* After initial division, tu = q tv + tu', we have
2851
   *
2852
   * u = 2^uz (tu' + q tv)
2853
   * v = 2^vz tv
2854
   *
2855
   * or
2856
   *
2857
   * t0 = 2^uz, t1 = 2^uz q
2858
   * s0 = 0,    s1 = 2^vz
2859
   */
2860
2861
0
  mpz_setbit (t0, uz);
2862
0
  mpz_tdiv_qr (t1, tu, tu, tv);
2863
0
  mpz_mul_2exp (t1, t1, uz);
2864
2865
0
  mpz_setbit (s1, vz);
2866
0
  power = uz + vz;
2867
2868
0
  if (tu->_mp_size > 0)
2869
0
    {
2870
0
      mp_bitcnt_t shift;
2871
0
      shift = mpz_make_odd (tu);
2872
0
      mpz_mul_2exp (t0, t0, shift);
2873
0
      mpz_mul_2exp (s0, s0, shift);
2874
0
      power += shift;
2875
2876
0
      for (;;)
2877
0
  {
2878
0
    int c;
2879
0
    c = mpz_cmp (tu, tv);
2880
0
    if (c == 0)
2881
0
      break;
2882
2883
0
    if (c < 0)
2884
0
      {
2885
        /* tv = tv' + tu
2886
         *
2887
         * u = t0 tu + t1 (tv' + tu) = (t0 + t1) tu + t1 tv'
2888
         * v = s0 tu + s1 (tv' + tu) = (s0 + s1) tu + s1 tv' */
2889
2890
0
        mpz_sub (tv, tv, tu);
2891
0
        mpz_add (t0, t0, t1);
2892
0
        mpz_add (s0, s0, s1);
2893
2894
0
        shift = mpz_make_odd (tv);
2895
0
        mpz_mul_2exp (t1, t1, shift);
2896
0
        mpz_mul_2exp (s1, s1, shift);
2897
0
      }
2898
0
    else
2899
0
      {
2900
0
        mpz_sub (tu, tu, tv);
2901
0
        mpz_add (t1, t0, t1);
2902
0
        mpz_add (s1, s0, s1);
2903
2904
0
        shift = mpz_make_odd (tu);
2905
0
        mpz_mul_2exp (t0, t0, shift);
2906
0
        mpz_mul_2exp (s0, s0, shift);
2907
0
      }
2908
0
    power += shift;
2909
0
  }
2910
0
    }
2911
2912
  /* Now tv = odd part of gcd, and -s0 and t0 are corresponding
2913
     cofactors. */
2914
2915
0
  mpz_mul_2exp (tv, tv, gz);
2916
0
  mpz_neg (s0, s0);
2917
2918
  /* 2^p g = s0 u + t0 v. Eliminate one factor of two at a time. To
2919
     adjust cofactors, we need u / g and v / g */
2920
2921
0
  mpz_divexact (s1, v, tv);
2922
0
  mpz_abs (s1, s1);
2923
0
  mpz_divexact (t1, u, tv);
2924
0
  mpz_abs (t1, t1);
2925
2926
0
  while (power-- > 0)
2927
0
    {
2928
      /* s0 u + t0 v = (s0 - v/g) u - (t0 + u/g) v */
2929
0
      if (mpz_odd_p (s0) || mpz_odd_p (t0))
2930
0
  {
2931
0
    mpz_sub (s0, s0, s1);
2932
0
    mpz_add (t0, t0, t1);
2933
0
  }
2934
0
      assert (mpz_even_p (t0) && mpz_even_p (s0));
2935
0
      mpz_tdiv_q_2exp (s0, s0, 1);
2936
0
      mpz_tdiv_q_2exp (t0, t0, 1);
2937
0
    }
2938
2939
  /* Arrange so that |s| < |u| / 2g */
2940
0
  mpz_add (s1, s0, s1);
2941
0
  if (mpz_cmpabs (s0, s1) > 0)
2942
0
    {
2943
0
      mpz_swap (s0, s1);
2944
0
      mpz_sub (t0, t0, t1);
2945
0
    }
2946
0
  if (u->_mp_size < 0)
2947
0
    mpz_neg (s0, s0);
2948
0
  if (v->_mp_size < 0)
2949
0
    mpz_neg (t0, t0);
2950
2951
0
  mpz_swap (g, tv);
2952
0
  if (s)
2953
0
    mpz_swap (s, s0);
2954
0
  if (t)
2955
0
    mpz_swap (t, t0);
2956
2957
0
  mpz_clear (tu);
2958
0
  mpz_clear (tv);
2959
0
  mpz_clear (s0);
2960
0
  mpz_clear (s1);
2961
0
  mpz_clear (t0);
2962
0
  mpz_clear (t1);
2963
0
}
2964
2965
void
2966
mpz_lcm (mpz_t r, const mpz_t u, const mpz_t v)
2967
0
{
2968
0
  mpz_t g;
2969
2970
0
  if (u->_mp_size == 0 || v->_mp_size == 0)
2971
0
    {
2972
0
      r->_mp_size = 0;
2973
0
      return;
2974
0
    }
2975
2976
0
  mpz_init (g);
2977
2978
0
  mpz_gcd (g, u, v);
2979
0
  mpz_divexact (g, u, g);
2980
0
  mpz_mul (r, g, v);
2981
2982
0
  mpz_clear (g);
2983
0
  mpz_abs (r, r);
2984
0
}
2985
2986
void
2987
mpz_lcm_ui (mpz_t r, const mpz_t u, unsigned long v)
2988
0
{
2989
0
  if (v == 0 || u->_mp_size == 0)
2990
0
    {
2991
0
      r->_mp_size = 0;
2992
0
      return;
2993
0
    }
2994
2995
0
  v /= mpz_gcd_ui (NULL, u, v);
2996
0
  mpz_mul_ui (r, u, v);
2997
2998
0
  mpz_abs (r, r);
2999
0
}
3000
3001
int
3002
mpz_invert (mpz_t r, const mpz_t u, const mpz_t m)
3003
0
{
3004
0
  mpz_t g, tr;
3005
0
  int invertible;
3006
3007
0
  if (u->_mp_size == 0 || mpz_cmpabs_ui (m, 1) <= 0)
3008
0
    return 0;
3009
3010
0
  mpz_init (g);
3011
0
  mpz_init (tr);
3012
3013
0
  mpz_gcdext (g, tr, NULL, u, m);
3014
0
  invertible = (mpz_cmp_ui (g, 1) == 0);
3015
3016
0
  if (invertible)
3017
0
    {
3018
0
      if (tr->_mp_size < 0)
3019
0
  {
3020
0
    if (m->_mp_size >= 0)
3021
0
      mpz_add (tr, tr, m);
3022
0
    else
3023
0
      mpz_sub (tr, tr, m);
3024
0
  }
3025
0
      mpz_swap (r, tr);
3026
0
    }
3027
3028
0
  mpz_clear (g);
3029
0
  mpz_clear (tr);
3030
0
  return invertible;
3031
0
}
3032
3033

3034
/* Higher level operations (sqrt, pow and root) */
3035
3036
void
3037
mpz_pow_ui (mpz_t r, const mpz_t b, unsigned long e)
3038
0
{
3039
0
  unsigned long bit;
3040
0
  mpz_t tr;
3041
0
  mpz_init_set_ui (tr, 1);
3042
3043
0
  bit = GMP_ULONG_HIGHBIT;
3044
0
  do
3045
0
    {
3046
0
      mpz_mul (tr, tr, tr);
3047
0
      if (e & bit)
3048
0
  mpz_mul (tr, tr, b);
3049
0
      bit >>= 1;
3050
0
    }
3051
0
  while (bit > 0);
3052
3053
0
  mpz_swap (r, tr);
3054
0
  mpz_clear (tr);
3055
0
}
3056
3057
void
3058
mpz_ui_pow_ui (mpz_t r, unsigned long blimb, unsigned long e)
3059
0
{
3060
0
  mpz_t b;
3061
3062
0
  mpz_init_set_ui (b, blimb);
3063
0
  mpz_pow_ui (r, b, e);
3064
0
  mpz_clear (b);
3065
0
}
3066
3067
void
3068
mpz_powm (mpz_t r, const mpz_t b, const mpz_t e, const mpz_t m)
3069
0
{
3070
0
  mpz_t tr;
3071
0
  mpz_t base;
3072
0
  mp_size_t en, mn;
3073
0
  mp_srcptr mp;
3074
0
  struct gmp_div_inverse minv;
3075
0
  unsigned shift;
3076
0
  mp_ptr tp = NULL;
3077
3078
0
  en = GMP_ABS (e->_mp_size);
3079
0
  mn = GMP_ABS (m->_mp_size);
3080
0
  if (mn == 0)
3081
0
    gmp_die ("mpz_powm: Zero modulo.");
3082
3083
0
  if (en == 0)
3084
0
    {
3085
0
      mpz_set_ui (r, 1);
3086
0
      return;
3087
0
    }
3088
3089
0
  mp = m->_mp_d;
3090
0
  mpn_div_qr_invert (&minv, mp, mn);
3091
0
  shift = minv.shift;
3092
3093
0
  if (shift > 0)
3094
0
    {
3095
      /* To avoid shifts, we do all our reductions, except the final
3096
   one, using a *normalized* m. */
3097
0
      minv.shift = 0;
3098
3099
0
      tp = gmp_xalloc_limbs (mn);
3100
0
      gmp_assert_nocarry (mpn_lshift (tp, mp, mn, shift));
3101
0
      mp = tp;
3102
0
    }
3103
3104
0
  mpz_init (base);
3105
3106
0
  if (e->_mp_size < 0)
3107
0
    {
3108
0
      if (!mpz_invert (base, b, m))
3109
0
  gmp_die ("mpz_powm: Negative exponent and non-invertible base.");
3110
0
    }
3111
0
  else
3112
0
    {
3113
0
      mp_size_t bn;
3114
0
      mpz_abs (base, b);
3115
3116
0
      bn = base->_mp_size;
3117
0
      if (bn >= mn)
3118
0
  {
3119
0
    mpn_div_qr_preinv (NULL, base->_mp_d, base->_mp_size, mp, mn, &minv);
3120
0
    bn = mn;
3121
0
  }
3122
3123
      /* We have reduced the absolute value. Now take care of the
3124
   sign. Note that we get zero represented non-canonically as
3125
   m. */
3126
0
      if (b->_mp_size < 0)
3127
0
  {
3128
0
    mp_ptr bp = MPZ_REALLOC (base, mn);
3129
0
    gmp_assert_nocarry (mpn_sub (bp, mp, mn, bp, bn));
3130
0
    bn = mn;
3131
0
  }
3132
0
      base->_mp_size = mpn_normalized_size (base->_mp_d, bn);
3133
0
    }
3134
0
  mpz_init_set_ui (tr, 1);
3135
3136
0
  while (--en >= 0)
3137
0
    {
3138
0
      mp_limb_t w = e->_mp_d[en];
3139
0
      mp_limb_t bit;
3140
3141
0
      bit = GMP_LIMB_HIGHBIT;
3142
0
      do
3143
0
  {
3144
0
    mpz_mul (tr, tr, tr);
3145
0
    if (w & bit)
3146
0
      mpz_mul (tr, tr, base);
3147
0
    if (tr->_mp_size > mn)
3148
0
      {
3149
0
        mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv);
3150
0
        tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn);
3151
0
      }
3152
0
    bit >>= 1;
3153
0
  }
3154
0
      while (bit > 0);
3155
0
    }
3156
3157
  /* Final reduction */
3158
0
  if (tr->_mp_size >= mn)
3159
0
    {
3160
0
      minv.shift = shift;
3161
0
      mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv);
3162
0
      tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn);
3163
0
    }
3164
0
  if (tp)
3165
0
    gmp_free (tp);
3166
3167
0
  mpz_swap (r, tr);
3168
0
  mpz_clear (tr);
3169
0
  mpz_clear (base);
3170
0
}
3171
3172
void
3173
mpz_powm_ui (mpz_t r, const mpz_t b, unsigned long elimb, const mpz_t m)
3174
0
{
3175
0
  mpz_t e;
3176
3177
0
  mpz_init_set_ui (e, elimb);
3178
0
  mpz_powm (r, b, e, m);
3179
0
  mpz_clear (e);
3180
0
}
3181
3182
/* x=trunc(y^(1/z)), r=y-x^z */
3183
void
3184
mpz_rootrem (mpz_t x, mpz_t r, const mpz_t y, unsigned long z)
3185
0
{
3186
0
  int sgn;
3187
0
  mpz_t t, u;
3188
3189
0
  sgn = y->_mp_size < 0;
3190
0
  if ((~z & sgn) != 0)
3191
0
    gmp_die ("mpz_rootrem: Negative argument, with even root.");
3192
0
  if (z == 0)
3193
0
    gmp_die ("mpz_rootrem: Zeroth root.");
3194
3195
0
  if (mpz_cmpabs_ui (y, 1) <= 0) {
3196
0
    if (x)
3197
0
      mpz_set (x, y);
3198
0
    if (r)
3199
0
      r->_mp_size = 0;
3200
0
    return;
3201
0
  }
3202
3203
0
  mpz_init (u);
3204
0
  mpz_init (t);
3205
0
  mpz_setbit (t, mpz_sizeinbase (y, 2) / z + 1);
3206
3207
0
  if (z == 2) /* simplify sqrt loop: z-1 == 1 */
3208
0
    do {
3209
0
      mpz_swap (u, t);      /* u = x */
3210
0
      mpz_tdiv_q (t, y, u);   /* t = y/x */
3211
0
      mpz_add (t, t, u);    /* t = y/x + x */
3212
0
      mpz_tdiv_q_2exp (t, t, 1);  /* x'= (y/x + x)/2 */
3213
0
    } while (mpz_cmpabs (t, u) < 0);  /* |x'| < |x| */
3214
0
  else /* z != 2 */ {
3215
0
    mpz_t v;
3216
3217
0
    mpz_init (v);
3218
0
    if (sgn)
3219
0
      mpz_neg (t, t);
3220
3221
0
    do {
3222
0
      mpz_swap (u, t);      /* u = x */
3223
0
      mpz_pow_ui (t, u, z - 1);   /* t = x^(z-1) */
3224
0
      mpz_tdiv_q (t, y, t);   /* t = y/x^(z-1) */
3225
0
      mpz_mul_ui (v, u, z - 1);   /* v = x*(z-1) */
3226
0
      mpz_add (t, t, v);    /* t = y/x^(z-1) + x*(z-1) */
3227
0
      mpz_tdiv_q_ui (t, t, z);    /* x'=(y/x^(z-1) + x*(z-1))/z */
3228
0
    } while (mpz_cmpabs (t, u) < 0);  /* |x'| < |x| */
3229
3230
0
    mpz_clear (v);
3231
0
  }
3232
3233
0
  if (r) {
3234
0
    mpz_pow_ui (t, u, z);
3235
0
    mpz_sub (r, y, t);
3236
0
  }
3237
0
  if (x)
3238
0
    mpz_swap (x, u);
3239
0
  mpz_clear (u);
3240
0
  mpz_clear (t);
3241
0
}
3242
3243
int
3244
mpz_root (mpz_t x, const mpz_t y, unsigned long z)
3245
0
{
3246
0
  int res;
3247
0
  mpz_t r;
3248
3249
0
  mpz_init (r);
3250
0
  mpz_rootrem (x, r, y, z);
3251
0
  res = r->_mp_size == 0;
3252
0
  mpz_clear (r);
3253
3254
0
  return res;
3255
0
}
3256
3257
/* Compute s = floor(sqrt(u)) and r = u - s^2. Allows r == NULL */
3258
void
3259
mpz_sqrtrem (mpz_t s, mpz_t r, const mpz_t u)
3260
0
{
3261
0
  mpz_rootrem (s, r, u, 2);
3262
0
}
3263
3264
void
3265
mpz_sqrt (mpz_t s, const mpz_t u)
3266
0
{
3267
0
  mpz_rootrem (s, NULL, u, 2);
3268
0
}
3269
3270
int
3271
mpz_perfect_square_p (const mpz_t u)
3272
0
{
3273
0
  if (u->_mp_size <= 0)
3274
0
    return (u->_mp_size == 0);
3275
0
  else
3276
0
    return mpz_root (NULL, u, 2);
3277
0
}
3278
3279
int
3280
mpn_perfect_square_p (mp_srcptr p, mp_size_t n)
3281
0
{
3282
0
  mpz_t t;
3283
3284
0
  assert (n > 0);
3285
0
  assert (p [n-1] != 0);
3286
0
  return mpz_root (NULL, mpz_roinit_normal_n (t, p, n), 2);
3287
0
}
3288
3289
mp_size_t
3290
mpn_sqrtrem (mp_ptr sp, mp_ptr rp, mp_srcptr p, mp_size_t n)
3291
0
{
3292
0
  mpz_t s, r, u;
3293
0
  mp_size_t res;
3294
3295
0
  assert (n > 0);
3296
0
  assert (p [n-1] != 0);
3297
3298
0
  mpz_init (r);
3299
0
  mpz_init (s);
3300
0
  mpz_rootrem (s, r, mpz_roinit_normal_n (u, p, n), 2);
3301
3302
0
  assert (s->_mp_size == (n+1)/2);
3303
0
  mpn_copyd (sp, s->_mp_d, s->_mp_size);
3304
0
  mpz_clear (s);
3305
0
  res = r->_mp_size;
3306
0
  if (rp)
3307
0
    mpn_copyd (rp, r->_mp_d, res);
3308
0
  mpz_clear (r);
3309
0
  return res;
3310
0
}
3311

3312
/* Combinatorics */
3313
3314
void
3315
mpz_mfac_uiui (mpz_t x, unsigned long n, unsigned long m)
3316
0
{
3317
0
  mpz_set_ui (x, n + (n == 0));
3318
0
  if (m + 1 < 2) return;
3319
0
  while (n > m + 1)
3320
0
    mpz_mul_ui (x, x, n -= m);
3321
0
}
3322
3323
void
3324
mpz_2fac_ui (mpz_t x, unsigned long n)
3325
0
{
3326
0
  mpz_mfac_uiui (x, n, 2);
3327
0
}
3328
3329
void
3330
mpz_fac_ui (mpz_t x, unsigned long n)
3331
0
{
3332
0
  mpz_mfac_uiui (x, n, 1);
3333
0
}
3334
3335
void
3336
mpz_bin_uiui (mpz_t r, unsigned long n, unsigned long k)
3337
0
{
3338
0
  mpz_t t;
3339
3340
0
  mpz_set_ui (r, k <= n);
3341
3342
0
  if (k > (n >> 1))
3343
0
    k = (k <= n) ? n - k : 0;
3344
3345
0
  mpz_init (t);
3346
0
  mpz_fac_ui (t, k);
3347
3348
0
  for (; k > 0; --k)
3349
0
    mpz_mul_ui (r, r, n--);
3350
3351
0
  mpz_divexact (r, r, t);
3352
0
  mpz_clear (t);
3353
0
}
3354
3355

3356
/* Primality testing */
3357
3358
/* Computes Kronecker (a/b) with odd b, a!=0 and GCD(a,b) = 1 */
3359
/* Adapted from JACOBI_BASE_METHOD==4 in mpn/generic/jacbase.c */
3360
static int
3361
gmp_jacobi_coprime (mp_limb_t a, mp_limb_t b)
3362
0
{
3363
0
  int c, bit = 0;
3364
3365
0
  assert (b & 1);
3366
0
  assert (a != 0);
3367
  /* assert (mpn_gcd_11 (a, b) == 1); */
3368
3369
  /* Below, we represent a and b shifted right so that the least
3370
     significant one bit is implicit. */
3371
0
  b >>= 1;
3372
3373
0
  gmp_ctz(c, a);
3374
0
  a >>= 1;
3375
3376
0
  for (;;)
3377
0
    {
3378
0
      a >>= c;
3379
      /* (2/b) = -1 if b = 3 or 5 mod 8 */
3380
0
      bit ^= c & (b ^ (b >> 1));
3381
0
      if (a < b)
3382
0
  {
3383
0
    if (a == 0)
3384
0
      return bit & 1 ? -1 : 1;
3385
0
    bit ^= a & b;
3386
0
    a = b - a;
3387
0
    b -= a;
3388
0
  }
3389
0
      else
3390
0
  {
3391
0
    a -= b;
3392
0
    assert (a != 0);
3393
0
  }
3394
3395
0
      gmp_ctz(c, a);
3396
0
      ++c;
3397
0
    }
3398
0
}
3399
3400
static void
3401
gmp_lucas_step_k_2k (mpz_t V, mpz_t Qk, const mpz_t n)
3402
0
{
3403
0
  mpz_mod (Qk, Qk, n);
3404
  /* V_{2k} <- V_k ^ 2 - 2Q^k */
3405
0
  mpz_mul (V, V, V);
3406
0
  mpz_submul_ui (V, Qk, 2);
3407
0
  mpz_tdiv_r (V, V, n);
3408
  /* Q^{2k} = (Q^k)^2 */
3409
0
  mpz_mul (Qk, Qk, Qk);
3410
0
}
3411
3412
/* Computes V_k, Q^k (mod n) for the Lucas' sequence */
3413
/* with P=1, Q=Q; k = (n>>b0)|1. */
3414
/* Requires an odd n > 4; b0 > 0; -2*Q must not overflow a long */
3415
/* Returns (U_k == 0) and sets V=V_k and Qk=Q^k. */
3416
static int
3417
gmp_lucas_mod (mpz_t V, mpz_t Qk, long Q,
3418
         mp_bitcnt_t b0, const mpz_t n)
3419
0
{
3420
0
  mp_bitcnt_t bs;
3421
0
  mpz_t U;
3422
0
  int res;
3423
3424
0
  assert (b0 > 0);
3425
0
  assert (Q <= - (LONG_MIN / 2));
3426
0
  assert (Q >= - (LONG_MAX / 2));
3427
0
  assert (mpz_cmp_ui (n, 4) > 0);
3428
0
  assert (mpz_odd_p (n));
3429
3430
0
  mpz_init_set_ui (U, 1); /* U1 = 1 */
3431
0
  mpz_set_ui (V, 1); /* V1 = 1 */
3432
0
  mpz_set_si (Qk, Q);
3433
3434
0
  for (bs = mpz_sizeinbase (n, 2) - 1; --bs >= b0;)
3435
0
    {
3436
      /* U_{2k} <- U_k * V_k */
3437
0
      mpz_mul (U, U, V);
3438
      /* V_{2k} <- V_k ^ 2 - 2Q^k */
3439
      /* Q^{2k} = (Q^k)^2 */
3440
0
      gmp_lucas_step_k_2k (V, Qk, n);
3441
3442
      /* A step k->k+1 is performed if the bit in $n$ is 1  */
3443
      /* mpz_tstbit(n,bs) or the bit is 0 in $n$ but  */
3444
      /* should be 1 in $n+1$ (bs == b0)      */
3445
0
      if (b0 == bs || mpz_tstbit (n, bs))
3446
0
  {
3447
    /* Q^{k+1} <- Q^k * Q */
3448
0
    mpz_mul_si (Qk, Qk, Q);
3449
    /* U_{k+1} <- (U_k + V_k) / 2 */
3450
0
    mpz_swap (U, V); /* Keep in V the old value of U_k */
3451
0
    mpz_add (U, U, V);
3452
    /* We have to compute U/2, so we need an even value, */
3453
    /* equivalent (mod n) */
3454
0
    if (mpz_odd_p (U))
3455
0
      mpz_add (U, U, n);
3456
0
    mpz_tdiv_q_2exp (U, U, 1);
3457
    /* V_{k+1} <-(D*U_k + V_k) / 2 =
3458
      U_{k+1} + (D-1)/2*U_k = U_{k+1} - 2Q*U_k */
3459
0
    mpz_mul_si (V, V, -2*Q);
3460
0
    mpz_add (V, U, V);
3461
0
    mpz_tdiv_r (V, V, n);
3462
0
  }
3463
0
      mpz_tdiv_r (U, U, n);
3464
0
    }
3465
3466
0
  res = U->_mp_size == 0;
3467
0
  mpz_clear (U);
3468
0
  return res;
3469
0
}
3470
3471
/* Performs strong Lucas' test on x, with parameters suggested */
3472
/* for the BPSW test. Qk is only passed to recycle a variable. */
3473
/* Requires GCD (x,6) = 1.*/
3474
static int
3475
gmp_stronglucas (const mpz_t x, mpz_t Qk)
3476
0
{
3477
0
  mp_bitcnt_t b0;
3478
0
  mpz_t V, n;
3479
0
  mp_limb_t maxD, D; /* The absolute value is stored. */
3480
0
  long Q;
3481
0
  mp_limb_t tl;
3482
3483
  /* Test on the absolute value. */
3484
0
  mpz_roinit_normal_n (n, x->_mp_d, GMP_ABS (x->_mp_size));
3485
3486
0
  assert (mpz_odd_p (n));
3487
  /* assert (mpz_gcd_ui (NULL, n, 6) == 1); */
3488
0
  if (mpz_root (Qk, n, 2))
3489
0
    return 0; /* A square is composite. */
3490
3491
  /* Check Ds up to square root (in case, n is prime)
3492
     or avoid overflows */
3493
0
  maxD = (Qk->_mp_size == 1) ? Qk->_mp_d [0] - 1 : GMP_LIMB_MAX;
3494
3495
0
  D = 3;
3496
  /* Search a D such that (D/n) = -1 in the sequence 5,-7,9,-11,.. */
3497
  /* For those Ds we have (D/n) = (n/|D|) */
3498
0
  do
3499
0
    {
3500
0
      if (D >= maxD)
3501
0
  return 1 + (D != GMP_LIMB_MAX); /* (1 + ! ~ D) */
3502
0
      D += 2;
3503
0
      tl = mpz_tdiv_ui (n, D);
3504
0
      if (tl == 0)
3505
0
  return 0;
3506
0
    }
3507
0
  while (gmp_jacobi_coprime (tl, D) == 1);
3508
3509
0
  mpz_init (V);
3510
3511
  /* n-(D/n) = n+1 = d*2^{b0}, with d = (n>>b0) | 1 */
3512
0
  b0 = mpz_scan0 (n, 0);
3513
3514
  /* D= P^2 - 4Q; P = 1; Q = (1-D)/4 */
3515
0
  Q = (D & 2) ? (long) (D >> 2) + 1 : -(long) (D >> 2);
3516
3517
0
  if (! gmp_lucas_mod (V, Qk, Q, b0, n)) /* If Ud != 0 */
3518
0
    while (V->_mp_size != 0 && --b0 != 0) /* while Vk != 0 */
3519
      /* V <- V ^ 2 - 2Q^k */
3520
      /* Q^{2k} = (Q^k)^2 */
3521
0
      gmp_lucas_step_k_2k (V, Qk, n);
3522
3523
0
  mpz_clear (V);
3524
0
  return (b0 != 0);
3525
0
}
3526
3527
static int
3528
gmp_millerrabin (const mpz_t n, const mpz_t nm1, mpz_t y,
3529
     const mpz_t q, mp_bitcnt_t k)
3530
0
{
3531
0
  assert (k > 0);
3532
3533
  /* Caller must initialize y to the base. */
3534
0
  mpz_powm (y, y, q, n);
3535
3536
0
  if (mpz_cmp_ui (y, 1) == 0 || mpz_cmp (y, nm1) == 0)
3537
0
    return 1;
3538
3539
0
  while (--k > 0)
3540
0
    {
3541
0
      mpz_powm_ui (y, y, 2, n);
3542
0
      if (mpz_cmp (y, nm1) == 0)
3543
0
  return 1;
3544
      /* y == 1 means that the previous y was a non-trivial square root
3545
   of 1 (mod n). y == 0 means that n is a power of the base.
3546
   In either case, n is not prime. */
3547
0
      if (mpz_cmp_ui (y, 1) <= 0)
3548
0
  return 0;
3549
0
    }
3550
0
  return 0;
3551
0
}
3552
3553
/* This product is 0xc0cfd797, and fits in 32 bits. */
3554
#define GMP_PRIME_PRODUCT \
3555
0
  (3UL*5UL*7UL*11UL*13UL*17UL*19UL*23UL*29UL)
3556
3557
/* Bit (p+1)/2 is set, for each odd prime <= 61 */
3558
0
#define GMP_PRIME_MASK 0xc96996dcUL
3559
3560
int
3561
mpz_probab_prime_p (const mpz_t n, int reps)
3562
0
{
3563
0
  mpz_t nm1;
3564
0
  mpz_t q;
3565
0
  mpz_t y;
3566
0
  mp_bitcnt_t k;
3567
0
  int is_prime;
3568
0
  int j;
3569
3570
  /* Note that we use the absolute value of n only, for compatibility
3571
     with the real GMP. */
3572
0
  if (mpz_even_p (n))
3573
0
    return (mpz_cmpabs_ui (n, 2) == 0) ? 2 : 0;
3574
3575
  /* Above test excludes n == 0 */
3576
0
  assert (n->_mp_size != 0);
3577
3578
0
  if (mpz_cmpabs_ui (n, 64) < 0)
3579
0
    return (GMP_PRIME_MASK >> (n->_mp_d[0] >> 1)) & 2;
3580
3581
0
  if (mpz_gcd_ui (NULL, n, GMP_PRIME_PRODUCT) != 1)
3582
0
    return 0;
3583
3584
  /* All prime factors are >= 31. */
3585
0
  if (mpz_cmpabs_ui (n, 31*31) < 0)
3586
0
    return 2;
3587
3588
0
  mpz_init (nm1);
3589
0
  mpz_init (q);
3590
3591
  /* Find q and k, where q is odd and n = 1 + 2**k * q.  */
3592
0
  mpz_abs (nm1, n);
3593
0
  nm1->_mp_d[0] -= 1;
3594
0
  k = mpz_scan1 (nm1, 0);
3595
0
  mpz_tdiv_q_2exp (q, nm1, k);
3596
3597
  /* BPSW test */
3598
0
  mpz_init_set_ui (y, 2);
3599
0
  is_prime = gmp_millerrabin (n, nm1, y, q, k) && gmp_stronglucas (n, y);
3600
0
  reps -= 24; /* skip the first 24 repetitions */
3601
3602
  /* Use Miller-Rabin, with a deterministic sequence of bases, a[j] =
3603
     j^2 + j + 41 using Euler's polynomial. We potentially stop early,
3604
     if a[j] >= n - 1. Since n >= 31*31, this can happen only if reps >
3605
     30 (a[30] == 971 > 31*31 == 961). */
3606
3607
0
  for (j = 0; is_prime & (j < reps); j++)
3608
0
    {
3609
0
      mpz_set_ui (y, (unsigned long) j*j+j+41);
3610
0
      if (mpz_cmp (y, nm1) >= 0)
3611
0
  {
3612
    /* Don't try any further bases. This "early" break does not affect
3613
       the result for any reasonable reps value (<=5000 was tested) */
3614
0
    assert (j >= 30);
3615
0
    break;
3616
0
  }
3617
0
      is_prime = gmp_millerrabin (n, nm1, y, q, k);
3618
0
    }
3619
0
  mpz_clear (nm1);
3620
0
  mpz_clear (q);
3621
0
  mpz_clear (y);
3622
3623
0
  return is_prime;
3624
0
}
3625
3626

3627
/* Logical operations and bit manipulation. */
3628
3629
/* Numbers are treated as if represented in two's complement (and
3630
   infinitely sign extended). For a negative values we get the two's
3631
   complement from -x = ~x + 1, where ~ is bitwise complement.
3632
   Negation transforms
3633
3634
     xxxx10...0
3635
3636
   into
3637
3638
     yyyy10...0
3639
3640
   where yyyy is the bitwise complement of xxxx. So least significant
3641
   bits, up to and including the first one bit, are unchanged, and
3642
   the more significant bits are all complemented.
3643
3644
   To change a bit from zero to one in a negative number, subtract the
3645
   corresponding power of two from the absolute value. This can never
3646
   underflow. To change a bit from one to zero, add the corresponding
3647
   power of two, and this might overflow. E.g., if x = -001111, the
3648
   two's complement is 110001. Clearing the least significant bit, we
3649
   get two's complement 110000, and -010000. */
3650
3651
int
3652
mpz_tstbit (const mpz_t d, mp_bitcnt_t bit_index)
3653
0
{
3654
0
  mp_size_t limb_index;
3655
0
  unsigned shift;
3656
0
  mp_size_t ds;
3657
0
  mp_size_t dn;
3658
0
  mp_limb_t w;
3659
0
  int bit;
3660
3661
0
  ds = d->_mp_size;
3662
0
  dn = GMP_ABS (ds);
3663
0
  limb_index = bit_index / GMP_LIMB_BITS;
3664
0
  if (limb_index >= dn)
3665
0
    return ds < 0;
3666
3667
0
  shift = bit_index % GMP_LIMB_BITS;
3668
0
  w = d->_mp_d[limb_index];
3669
0
  bit = (w >> shift) & 1;
3670
3671
0
  if (ds < 0)
3672
0
    {
3673
      /* d < 0. Check if any of the bits below is set: If so, our bit
3674
   must be complemented. */
3675
0
      if (shift > 0 && (mp_limb_t) (w << (GMP_LIMB_BITS - shift)) > 0)
3676
0
  return bit ^ 1;
3677
0
      while (--limb_index >= 0)
3678
0
  if (d->_mp_d[limb_index] > 0)
3679
0
    return bit ^ 1;
3680
0
    }
3681
0
  return bit;
3682
0
}
3683
3684
static void
3685
mpz_abs_add_bit (mpz_t d, mp_bitcnt_t bit_index)
3686
0
{
3687
0
  mp_size_t dn, limb_index;
3688
0
  mp_limb_t bit;
3689
0
  mp_ptr dp;
3690
3691
0
  dn = GMP_ABS (d->_mp_size);
3692
3693
0
  limb_index = bit_index / GMP_LIMB_BITS;
3694
0
  bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS);
3695
3696
0
  if (limb_index >= dn)
3697
0
    {
3698
0
      mp_size_t i;
3699
      /* The bit should be set outside of the end of the number.
3700
   We have to increase the size of the number. */
3701
0
      dp = MPZ_REALLOC (d, limb_index + 1);
3702
3703
0
      dp[limb_index] = bit;
3704
0
      for (i = dn; i < limb_index; i++)
3705
0
  dp[i] = 0;
3706
0
      dn = limb_index + 1;
3707
0
    }
3708
0
  else
3709
0
    {
3710
0
      mp_limb_t cy;
3711
3712
0
      dp = d->_mp_d;
3713
3714
0
      cy = mpn_add_1 (dp + limb_index, dp + limb_index, dn - limb_index, bit);
3715
0
      if (cy > 0)
3716
0
  {
3717
0
    dp = MPZ_REALLOC (d, dn + 1);
3718
0
    dp[dn++] = cy;
3719
0
  }
3720
0
    }
3721
3722
0
  d->_mp_size = (d->_mp_size < 0) ? - dn : dn;
3723
0
}
3724
3725
static void
3726
mpz_abs_sub_bit (mpz_t d, mp_bitcnt_t bit_index)
3727
0
{
3728
0
  mp_size_t dn, limb_index;
3729
0
  mp_ptr dp;
3730
0
  mp_limb_t bit;
3731
3732
0
  dn = GMP_ABS (d->_mp_size);
3733
0
  dp = d->_mp_d;
3734
3735
0
  limb_index = bit_index / GMP_LIMB_BITS;
3736
0
  bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS);
3737
3738
0
  assert (limb_index < dn);
3739
3740
0
  gmp_assert_nocarry (mpn_sub_1 (dp + limb_index, dp + limb_index,
3741
0
         dn - limb_index, bit));
3742
0
  dn = mpn_normalized_size (dp, dn);
3743
0
  d->_mp_size = (d->_mp_size < 0) ? - dn : dn;
3744
0
}
3745
3746
void
3747
mpz_setbit (mpz_t d, mp_bitcnt_t bit_index)
3748
0
{
3749
0
  if (!mpz_tstbit (d, bit_index))
3750
0
    {
3751
0
      if (d->_mp_size >= 0)
3752
0
  mpz_abs_add_bit (d, bit_index);
3753
0
      else
3754
0
  mpz_abs_sub_bit (d, bit_index);
3755
0
    }
3756
0
}
3757
3758
void
3759
mpz_clrbit (mpz_t d, mp_bitcnt_t bit_index)
3760
0
{
3761
0
  if (mpz_tstbit (d, bit_index))
3762
0
    {
3763
0
      if (d->_mp_size >= 0)
3764
0
  mpz_abs_sub_bit (d, bit_index);
3765
0
      else
3766
0
  mpz_abs_add_bit (d, bit_index);
3767
0
    }
3768
0
}
3769
3770
void
3771
mpz_combit (mpz_t d, mp_bitcnt_t bit_index)
3772
0
{
3773
0
  if (mpz_tstbit (d, bit_index) ^ (d->_mp_size < 0))
3774
0
    mpz_abs_sub_bit (d, bit_index);
3775
0
  else
3776
0
    mpz_abs_add_bit (d, bit_index);
3777
0
}
3778
3779
void
3780
mpz_com (mpz_t r, const mpz_t u)
3781
0
{
3782
0
  mpz_add_ui (r, u, 1);
3783
0
  mpz_neg (r, r);
3784
0
}
3785
3786
void
3787
mpz_and (mpz_t r, const mpz_t u, const mpz_t v)
3788
0
{
3789
0
  mp_size_t un, vn, rn, i;
3790
0
  mp_ptr up, vp, rp;
3791
3792
0
  mp_limb_t ux, vx, rx;
3793
0
  mp_limb_t uc, vc, rc;
3794
0
  mp_limb_t ul, vl, rl;
3795
3796
0
  un = GMP_ABS (u->_mp_size);
3797
0
  vn = GMP_ABS (v->_mp_size);
3798
0
  if (un < vn)
3799
0
    {
3800
0
      MPZ_SRCPTR_SWAP (u, v);
3801
0
      MP_SIZE_T_SWAP (un, vn);
3802
0
    }
3803
0
  if (vn == 0)
3804
0
    {
3805
0
      r->_mp_size = 0;
3806
0
      return;
3807
0
    }
3808
3809
0
  uc = u->_mp_size < 0;
3810
0
  vc = v->_mp_size < 0;
3811
0
  rc = uc & vc;
3812
3813
0
  ux = -uc;
3814
0
  vx = -vc;
3815
0
  rx = -rc;
3816
3817
  /* If the smaller input is positive, higher limbs don't matter. */
3818
0
  rn = vx ? un : vn;
3819
3820
0
  rp = MPZ_REALLOC (r, rn + (mp_size_t) rc);
3821
3822
0
  up = u->_mp_d;
3823
0
  vp = v->_mp_d;
3824
3825
0
  i = 0;
3826
0
  do
3827
0
    {
3828
0
      ul = (up[i] ^ ux) + uc;
3829
0
      uc = ul < uc;
3830
3831
0
      vl = (vp[i] ^ vx) + vc;
3832
0
      vc = vl < vc;
3833
3834
0
      rl = ( (ul & vl) ^ rx) + rc;
3835
0
      rc = rl < rc;
3836
0
      rp[i] = rl;
3837
0
    }
3838
0
  while (++i < vn);
3839
0
  assert (vc == 0);
3840
3841
0
  for (; i < rn; i++)
3842
0
    {
3843
0
      ul = (up[i] ^ ux) + uc;
3844
0
      uc = ul < uc;
3845
3846
0
      rl = ( (ul & vx) ^ rx) + rc;
3847
0
      rc = rl < rc;
3848
0
      rp[i] = rl;
3849
0
    }
3850
0
  if (rc)
3851
0
    rp[rn++] = rc;
3852
0
  else
3853
0
    rn = mpn_normalized_size (rp, rn);
3854
3855
0
  r->_mp_size = rx ? -rn : rn;
3856
0
}
3857
3858
void
3859
mpz_ior (mpz_t r, const mpz_t u, const mpz_t v)
3860
0
{
3861
0
  mp_size_t un, vn, rn, i;
3862
0
  mp_ptr up, vp, rp;
3863
3864
0
  mp_limb_t ux, vx, rx;
3865
0
  mp_limb_t uc, vc, rc;
3866
0
  mp_limb_t ul, vl, rl;
3867
3868
0
  un = GMP_ABS (u->_mp_size);
3869
0
  vn = GMP_ABS (v->_mp_size);
3870
0
  if (un < vn)
3871
0
    {
3872
0
      MPZ_SRCPTR_SWAP (u, v);
3873
0
      MP_SIZE_T_SWAP (un, vn);
3874
0
    }
3875
0
  if (vn == 0)
3876
0
    {
3877
0
      mpz_set (r, u);
3878
0
      return;
3879
0
    }
3880
3881
0
  uc = u->_mp_size < 0;
3882
0
  vc = v->_mp_size < 0;
3883
0
  rc = uc | vc;
3884
3885
0
  ux = -uc;
3886
0
  vx = -vc;
3887
0
  rx = -rc;
3888
3889
  /* If the smaller input is negative, by sign extension higher limbs
3890
     don't matter. */
3891
0
  rn = vx ? vn : un;
3892
3893
0
  rp = MPZ_REALLOC (r, rn + (mp_size_t) rc);
3894
3895
0
  up = u->_mp_d;
3896
0
  vp = v->_mp_d;
3897
3898
0
  i = 0;
3899
0
  do
3900
0
    {
3901
0
      ul = (up[i] ^ ux) + uc;
3902
0
      uc = ul < uc;
3903
3904
0
      vl = (vp[i] ^ vx) + vc;
3905
0
      vc = vl < vc;
3906
3907
0
      rl = ( (ul | vl) ^ rx) + rc;
3908
0
      rc = rl < rc;
3909
0
      rp[i] = rl;
3910
0
    }
3911
0
  while (++i < vn);
3912
0
  assert (vc == 0);
3913
3914
0
  for (; i < rn; i++)
3915
0
    {
3916
0
      ul = (up[i] ^ ux) + uc;
3917
0
      uc = ul < uc;
3918
3919
0
      rl = ( (ul | vx) ^ rx) + rc;
3920
0
      rc = rl < rc;
3921
0
      rp[i] = rl;
3922
0
    }
3923
0
  if (rc)
3924
0
    rp[rn++] = rc;
3925
0
  else
3926
0
    rn = mpn_normalized_size (rp, rn);
3927
3928
0
  r->_mp_size = rx ? -rn : rn;
3929
0
}
3930
3931
void
3932
mpz_xor (mpz_t r, const mpz_t u, const mpz_t v)
3933
0
{
3934
0
  mp_size_t un, vn, i;
3935
0
  mp_ptr up, vp, rp;
3936
3937
0
  mp_limb_t ux, vx, rx;
3938
0
  mp_limb_t uc, vc, rc;
3939
0
  mp_limb_t ul, vl, rl;
3940
3941
0
  un = GMP_ABS (u->_mp_size);
3942
0
  vn = GMP_ABS (v->_mp_size);
3943
0
  if (un < vn)
3944
0
    {
3945
0
      MPZ_SRCPTR_SWAP (u, v);
3946
0
      MP_SIZE_T_SWAP (un, vn);
3947
0
    }
3948
0
  if (vn == 0)
3949
0
    {
3950
0
      mpz_set (r, u);
3951
0
      return;
3952
0
    }
3953
3954
0
  uc = u->_mp_size < 0;
3955
0
  vc = v->_mp_size < 0;
3956
0
  rc = uc ^ vc;
3957
3958
0
  ux = -uc;
3959
0
  vx = -vc;
3960
0
  rx = -rc;
3961
3962
0
  rp = MPZ_REALLOC (r, un + (mp_size_t) rc);
3963
3964
0
  up = u->_mp_d;
3965
0
  vp = v->_mp_d;
3966
3967
0
  i = 0;
3968
0
  do
3969
0
    {
3970
0
      ul = (up[i] ^ ux) + uc;
3971
0
      uc = ul < uc;
3972
3973
0
      vl = (vp[i] ^ vx) + vc;
3974
0
      vc = vl < vc;
3975
3976
0
      rl = (ul ^ vl ^ rx) + rc;
3977
0
      rc = rl < rc;
3978
0
      rp[i] = rl;
3979
0
    }
3980
0
  while (++i < vn);
3981
0
  assert (vc == 0);
3982
3983
0
  for (; i < un; i++)
3984
0
    {
3985
0
      ul = (up[i] ^ ux) + uc;
3986
0
      uc = ul < uc;
3987
3988
0
      rl = (ul ^ ux) + rc;
3989
0
      rc = rl < rc;
3990
0
      rp[i] = rl;
3991
0
    }
3992
0
  if (rc)
3993
0
    rp[un++] = rc;
3994
0
  else
3995
0
    un = mpn_normalized_size (rp, un);
3996
3997
0
  r->_mp_size = rx ? -un : un;
3998
0
}
3999
4000
static unsigned
4001
gmp_popcount_limb (mp_limb_t x)
4002
0
{
4003
0
  unsigned c;
4004
4005
  /* Do 16 bits at a time, to avoid limb-sized constants. */
4006
0
  int LOCAL_SHIFT_BITS = 16;
4007
0
  for (c = 0; x > 0;)
4008
0
    {
4009
0
      unsigned w = x - ((x >> 1) & 0x5555);
4010
0
      w = ((w >> 2) & 0x3333) + (w & 0x3333);
4011
0
      w =  (w >> 4) + w;
4012
0
      w = ((w >> 8) & 0x000f) + (w & 0x000f);
4013
0
      c += w;
4014
0
      if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS)
4015
0
  x >>= LOCAL_SHIFT_BITS;
4016
0
      else
4017
0
  x = 0;
4018
0
    }
4019
0
  return c;
4020
0
}
4021
4022
mp_bitcnt_t
4023
mpn_popcount (mp_srcptr p, mp_size_t n)
4024
0
{
4025
0
  mp_size_t i;
4026
0
  mp_bitcnt_t c;
4027
4028
0
  for (c = 0, i = 0; i < n; i++)
4029
0
    c += gmp_popcount_limb (p[i]);
4030
4031
0
  return c;
4032
0
}
4033
4034
mp_bitcnt_t
4035
mpz_popcount (const mpz_t u)
4036
0
{
4037
0
  mp_size_t un;
4038
4039
0
  un = u->_mp_size;
4040
4041
0
  if (un < 0)
4042
0
    return ~(mp_bitcnt_t) 0;
4043
4044
0
  return mpn_popcount (u->_mp_d, un);
4045
0
}
4046
4047
mp_bitcnt_t
4048
mpz_hamdist (const mpz_t u, const mpz_t v)
4049
0
{
4050
0
  mp_size_t un, vn, i;
4051
0
  mp_limb_t uc, vc, ul, vl, comp;
4052
0
  mp_srcptr up, vp;
4053
0
  mp_bitcnt_t c;
4054
4055
0
  un = u->_mp_size;
4056
0
  vn = v->_mp_size;
4057
4058
0
  if ( (un ^ vn) < 0)
4059
0
    return ~(mp_bitcnt_t) 0;
4060
4061
0
  comp = - (uc = vc = (un < 0));
4062
0
  if (uc)
4063
0
    {
4064
0
      assert (vn < 0);
4065
0
      un = -un;
4066
0
      vn = -vn;
4067
0
    }
4068
4069
0
  up = u->_mp_d;
4070
0
  vp = v->_mp_d;
4071
4072
0
  if (un < vn)
4073
0
    MPN_SRCPTR_SWAP (up, un, vp, vn);
4074
4075
0
  for (i = 0, c = 0; i < vn; i++)
4076
0
    {
4077
0
      ul = (up[i] ^ comp) + uc;
4078
0
      uc = ul < uc;
4079
4080
0
      vl = (vp[i] ^ comp) + vc;
4081
0
      vc = vl < vc;
4082
4083
0
      c += gmp_popcount_limb (ul ^ vl);
4084
0
    }
4085
0
  assert (vc == 0);
4086
4087
0
  for (; i < un; i++)
4088
0
    {
4089
0
      ul = (up[i] ^ comp) + uc;
4090
0
      uc = ul < uc;
4091
4092
0
      c += gmp_popcount_limb (ul ^ comp);
4093
0
    }
4094
4095
0
  return c;
4096
0
}
4097
4098
mp_bitcnt_t
4099
mpz_scan1 (const mpz_t u, mp_bitcnt_t starting_bit)
4100
0
{
4101
0
  mp_ptr up;
4102
0
  mp_size_t us, un, i;
4103
0
  mp_limb_t limb, ux;
4104
4105
0
  us = u->_mp_size;
4106
0
  un = GMP_ABS (us);
4107
0
  i = starting_bit / GMP_LIMB_BITS;
4108
4109
  /* Past the end there's no 1 bits for u>=0, or an immediate 1 bit
4110
     for u<0. Notice this test picks up any u==0 too. */
4111
0
  if (i >= un)
4112
0
    return (us >= 0 ? ~(mp_bitcnt_t) 0 : starting_bit);
4113
4114
0
  up = u->_mp_d;
4115
0
  ux = 0;
4116
0
  limb = up[i];
4117
4118
0
  if (starting_bit != 0)
4119
0
    {
4120
0
      if (us < 0)
4121
0
  {
4122
0
    ux = mpn_zero_p (up, i);
4123
0
    limb = ~ limb + ux;
4124
0
    ux = - (mp_limb_t) (limb >= ux);
4125
0
  }
4126
4127
      /* Mask to 0 all bits before starting_bit, thus ignoring them. */
4128
0
      limb &= GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS);
4129
0
    }
4130
4131
0
  return mpn_common_scan (limb, i, up, un, ux);
4132
0
}
4133
4134
mp_bitcnt_t
4135
mpz_scan0 (const mpz_t u, mp_bitcnt_t starting_bit)
4136
0
{
4137
0
  mp_ptr up;
4138
0
  mp_size_t us, un, i;
4139
0
  mp_limb_t limb, ux;
4140
4141
0
  us = u->_mp_size;
4142
0
  ux = - (mp_limb_t) (us >= 0);
4143
0
  un = GMP_ABS (us);
4144
0
  i = starting_bit / GMP_LIMB_BITS;
4145
4146
  /* When past end, there's an immediate 0 bit for u>=0, or no 0 bits for
4147
     u<0.  Notice this test picks up all cases of u==0 too. */
4148
0
  if (i >= un)
4149
0
    return (ux ? starting_bit : ~(mp_bitcnt_t) 0);
4150
4151
0
  up = u->_mp_d;
4152
0
  limb = up[i] ^ ux;
4153
4154
0
  if (ux == 0)
4155
0
    limb -= mpn_zero_p (up, i); /* limb = ~(~limb + zero_p) */
4156
4157
  /* Mask all bits before starting_bit, thus ignoring them. */
4158
0
  limb &= GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS);
4159
4160
0
  return mpn_common_scan (limb, i, up, un, ux);
4161
0
}
4162
4163

4164
/* MPZ base conversion. */
4165
4166
size_t
4167
mpz_sizeinbase (const mpz_t u, int base)
4168
3.52k
{
4169
3.52k
  mp_size_t un;
4170
3.52k
  mp_srcptr up;
4171
3.52k
  mp_ptr tp;
4172
3.52k
  mp_bitcnt_t bits;
4173
3.52k
  struct gmp_div_inverse bi;
4174
3.52k
  size_t ndigits;
4175
4176
3.52k
  assert (base >= 2);
4177
3.52k
  assert (base <= 62);
4178
4179
3.52k
  un = GMP_ABS (u->_mp_size);
4180
3.52k
  if (un == 0)
4181
0
    return 1;
4182
4183
3.52k
  up = u->_mp_d;
4184
4185
3.52k
  bits = (un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]);
4186
3.52k
  switch (base)
4187
3.52k
    {
4188
0
    case 2:
4189
0
      return bits;
4190
0
    case 4:
4191
0
      return (bits + 1) / 2;
4192
0
    case 8:
4193
0
      return (bits + 2) / 3;
4194
0
    case 16:
4195
0
      return (bits + 3) / 4;
4196
0
    case 32:
4197
0
      return (bits + 4) / 5;
4198
      /* FIXME: Do something more clever for the common case of base
4199
   10. */
4200
3.52k
    }
4201
4202
3.52k
  tp = gmp_xalloc_limbs (un);
4203
3.52k
  mpn_copyi (tp, up, un);
4204
3.52k
  mpn_div_qr_1_invert (&bi, base);
4205
4206
3.52k
  ndigits = 0;
4207
3.52k
  do
4208
326k
    {
4209
326k
      ndigits++;
4210
326k
      mpn_div_qr_1_preinv (tp, tp, un, &bi);
4211
326k
      un -= (tp[un-1] == 0);
4212
326k
    }
4213
326k
  while (un > 0);
4214
4215
3.52k
  gmp_free (tp);
4216
3.52k
  return ndigits;
4217
3.52k
}
4218
4219
char *
4220
mpz_get_str (char *sp, int base, const mpz_t u)
4221
3.52k
{
4222
3.52k
  unsigned bits;
4223
3.52k
  const char *digits;
4224
3.52k
  mp_size_t un;
4225
3.52k
  size_t i, sn;
4226
4227
3.52k
  digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
4228
3.52k
  if (base > 1)
4229
3.52k
    {
4230
3.52k
      if (base <= 36)
4231
3.52k
  digits = "0123456789abcdefghijklmnopqrstuvwxyz";
4232
0
      else if (base > 62)
4233
0
  return NULL;
4234
3.52k
    }
4235
0
  else if (base >= -1)
4236
0
    base = 10;
4237
0
  else
4238
0
    {
4239
0
      base = -base;
4240
0
      if (base > 36)
4241
0
  return NULL;
4242
0
    }
4243
4244
3.52k
  sn = 1 + mpz_sizeinbase (u, base);
4245
3.52k
  if (!sp)
4246
3.52k
    sp = (char *) gmp_xalloc (1 + sn);
4247
4248
3.52k
  un = GMP_ABS (u->_mp_size);
4249
4250
3.52k
  if (un == 0)
4251
0
    {
4252
0
      sp[0] = '0';
4253
0
      sp[1] = '\0';
4254
0
      return sp;
4255
0
    }
4256
4257
3.52k
  i = 0;
4258
4259
3.52k
  if (u->_mp_size < 0)
4260
0
    sp[i++] = '-';
4261
4262
3.52k
  bits = mpn_base_power_of_two_p (base);
4263
4264
3.52k
  if (bits)
4265
    /* Not modified in this case. */
4266
0
    sn = i + mpn_get_str_bits ((unsigned char *) sp + i, bits, u->_mp_d, un);
4267
3.52k
  else
4268
3.52k
    {
4269
3.52k
      struct mpn_base_info info;
4270
3.52k
      mp_ptr tp;
4271
4272
3.52k
      mpn_get_base_info (&info, base);
4273
3.52k
      tp = gmp_xalloc_limbs (un);
4274
3.52k
      mpn_copyi (tp, u->_mp_d, un);
4275
4276
3.52k
      sn = i + mpn_get_str_other ((unsigned char *) sp + i, base, &info, tp, un);
4277
3.52k
      gmp_free (tp);
4278
3.52k
    }
4279
4280
330k
  for (; i < sn; i++)
4281
326k
    sp[i] = digits[(unsigned char) sp[i]];
4282
4283
3.52k
  sp[sn] = '\0';
4284
3.52k
  return sp;
4285
3.52k
}
4286
4287
int
4288
mpz_set_str (mpz_t r, const char *sp, int base)
4289
5.01k
{
4290
5.01k
  unsigned bits, value_of_a;
4291
5.01k
  mp_size_t rn, alloc;
4292
5.01k
  mp_ptr rp;
4293
5.01k
  size_t dn;
4294
5.01k
  int sign;
4295
5.01k
  unsigned char *dp;
4296
4297
5.01k
  assert (base == 0 || (base >= 2 && base <= 62));
4298
4299
5.01k
  while (isspace( (unsigned char) *sp))
4300
0
    sp++;
4301
4302
5.01k
  sign = (*sp == '-');
4303
5.01k
  sp += sign;
4304
4305
5.01k
  if (base == 0)
4306
5.01k
    {
4307
5.01k
      if (sp[0] == '0')
4308
286
  {
4309
286
    if (sp[1] == 'x' || sp[1] == 'X')
4310
0
      {
4311
0
        base = 16;
4312
0
        sp += 2;
4313
0
      }
4314
286
    else if (sp[1] == 'b' || sp[1] == 'B')
4315
0
      {
4316
0
        base = 2;
4317
0
        sp += 2;
4318
0
      }
4319
286
    else
4320
286
      base = 8;
4321
286
  }
4322
4.73k
      else
4323
4.73k
  base = 10;
4324
5.01k
    }
4325
4326
5.01k
  if (!*sp)
4327
0
    {
4328
0
      r->_mp_size = 0;
4329
0
      return -1;
4330
0
    }
4331
5.01k
  dp = (unsigned char *) gmp_xalloc (strlen (sp));
4332
4333
5.01k
  value_of_a = (base > 36) ? 36 : 10;
4334
415k
  for (dn = 0; *sp; sp++)
4335
410k
    {
4336
410k
      unsigned digit;
4337
4338
410k
      if (isspace ((unsigned char) *sp))
4339
0
  continue;
4340
410k
      else if (*sp >= '0' && *sp <= '9')
4341
410k
  digit = *sp - '0';
4342
0
      else if (*sp >= 'a' && *sp <= 'z')
4343
0
  digit = *sp - 'a' + value_of_a;
4344
0
      else if (*sp >= 'A' && *sp <= 'Z')
4345
0
  digit = *sp - 'A' + 10;
4346
0
      else
4347
0
  digit = base; /* fail */
4348
4349
410k
      if (digit >= (unsigned) base)
4350
0
  {
4351
0
    gmp_free (dp);
4352
0
    r->_mp_size = 0;
4353
0
    return -1;
4354
0
  }
4355
4356
410k
      dp[dn++] = digit;
4357
410k
    }
4358
4359
5.01k
  if (!dn)
4360
0
    {
4361
0
      gmp_free (dp);
4362
0
      r->_mp_size = 0;
4363
0
      return -1;
4364
0
    }
4365
5.01k
  bits = mpn_base_power_of_two_p (base);
4366
4367
5.01k
  if (bits > 0)
4368
286
    {
4369
286
      alloc = (dn * bits + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS;
4370
286
      rp = MPZ_REALLOC (r, alloc);
4371
286
      rn = mpn_set_str_bits (rp, dp, dn, bits);
4372
286
    }
4373
4.73k
  else
4374
4.73k
    {
4375
4.73k
      struct mpn_base_info info;
4376
4.73k
      mpn_get_base_info (&info, base);
4377
4.73k
      alloc = (dn + info.exp - 1) / info.exp;
4378
4.73k
      rp = MPZ_REALLOC (r, alloc);
4379
4.73k
      rn = mpn_set_str_other (rp, dp, dn, base, &info);
4380
      /* Normalization, needed for all-zero input. */
4381
4.73k
      assert (rn > 0);
4382
4.73k
      rn -= rp[rn-1] == 0;
4383
4.73k
    }
4384
5.01k
  assert (rn <= alloc);
4385
5.01k
  gmp_free (dp);
4386
4387
5.01k
  r->_mp_size = sign ? - rn : rn;
4388
4389
5.01k
  return 0;
4390
5.01k
}
4391
4392
int
4393
mpz_init_set_str (mpz_t r, const char *sp, int base)
4394
3.10k
{
4395
3.10k
  mpz_init (r);
4396
3.10k
  return mpz_set_str (r, sp, base);
4397
3.10k
}
4398
4399
size_t
4400
mpz_out_str (FILE *stream, int base, const mpz_t x)
4401
0
{
4402
0
  char *str;
4403
0
  size_t len;
4404
4405
0
  str = mpz_get_str (NULL, base, x);
4406
0
  len = strlen (str);
4407
0
  len = fwrite (str, 1, len, stream);
4408
0
  gmp_free (str);
4409
0
  return len;
4410
0
}
4411
4412

4413
static int
4414
gmp_detect_endian (void)
4415
0
{
4416
0
  static const int i = 2;
4417
0
  const unsigned char *p = (const unsigned char *) &i;
4418
0
  return 1 - *p;
4419
0
}
4420
4421
/* Import and export. Does not support nails. */
4422
void
4423
mpz_import (mpz_t r, size_t count, int order, size_t size, int endian,
4424
      size_t nails, const void *src)
4425
0
{
4426
0
  const unsigned char *p;
4427
0
  ptrdiff_t word_step;
4428
0
  mp_ptr rp;
4429
0
  mp_size_t rn;
4430
4431
  /* The current (partial) limb. */
4432
0
  mp_limb_t limb;
4433
  /* The number of bytes already copied to this limb (starting from
4434
     the low end). */
4435
0
  size_t bytes;
4436
  /* The index where the limb should be stored, when completed. */
4437
0
  mp_size_t i;
4438
4439
0
  if (nails != 0)
4440
0
    gmp_die ("mpz_import: Nails not supported.");
4441
4442
0
  assert (order == 1 || order == -1);
4443
0
  assert (endian >= -1 && endian <= 1);
4444
4445
0
  if (endian == 0)
4446
0
    endian = gmp_detect_endian ();
4447
4448
0
  p = (unsigned char *) src;
4449
4450
0
  word_step = (order != endian) ? 2 * size : 0;
4451
4452
  /* Process bytes from the least significant end, so point p at the
4453
     least significant word. */
4454
0
  if (order == 1)
4455
0
    {
4456
0
      p += size * (count - 1);
4457
0
      word_step = - word_step;
4458
0
    }
4459
4460
  /* And at least significant byte of that word. */
4461
0
  if (endian == 1)
4462
0
    p += (size - 1);
4463
4464
0
  rn = (size * count + sizeof(mp_limb_t) - 1) / sizeof(mp_limb_t);
4465
0
  rp = MPZ_REALLOC (r, rn);
4466
4467
0
  for (limb = 0, bytes = 0, i = 0; count > 0; count--, p += word_step)
4468
0
    {
4469
0
      size_t j;
4470
0
      for (j = 0; j < size; j++, p -= (ptrdiff_t) endian)
4471
0
  {
4472
0
    limb |= (mp_limb_t) *p << (bytes++ * CHAR_BIT);
4473
0
    if (bytes == sizeof(mp_limb_t))
4474
0
      {
4475
0
        rp[i++] = limb;
4476
0
        bytes = 0;
4477
0
        limb = 0;
4478
0
      }
4479
0
  }
4480
0
    }
4481
0
  assert (i + (bytes > 0) == rn);
4482
0
  if (limb != 0)
4483
0
    rp[i++] = limb;
4484
0
  else
4485
0
    i = mpn_normalized_size (rp, i);
4486
4487
0
  r->_mp_size = i;
4488
0
}
4489
4490
void *
4491
mpz_export (void *r, size_t *countp, int order, size_t size, int endian,
4492
      size_t nails, const mpz_t u)
4493
0
{
4494
0
  size_t count;
4495
0
  mp_size_t un;
4496
4497
0
  if (nails != 0)
4498
0
    gmp_die ("mpz_import: Nails not supported.");
4499
4500
0
  assert (order == 1 || order == -1);
4501
0
  assert (endian >= -1 && endian <= 1);
4502
0
  assert (size > 0 || u->_mp_size == 0);
4503
4504
0
  un = u->_mp_size;
4505
0
  count = 0;
4506
0
  if (un != 0)
4507
0
    {
4508
0
      size_t k;
4509
0
      unsigned char *p;
4510
0
      ptrdiff_t word_step;
4511
      /* The current (partial) limb. */
4512
0
      mp_limb_t limb;
4513
      /* The number of bytes left to do in this limb. */
4514
0
      size_t bytes;
4515
      /* The index where the limb was read. */
4516
0
      mp_size_t i;
4517
4518
0
      un = GMP_ABS (un);
4519
4520
      /* Count bytes in top limb. */
4521
0
      limb = u->_mp_d[un-1];
4522
0
      assert (limb != 0);
4523
4524
0
      k = (GMP_LIMB_BITS <= CHAR_BIT);
4525
0
      if (!k)
4526
0
  {
4527
0
    do {
4528
0
      int LOCAL_CHAR_BIT = CHAR_BIT;
4529
0
      k++; limb >>= LOCAL_CHAR_BIT;
4530
0
    } while (limb != 0);
4531
0
  }
4532
      /* else limb = 0; */
4533
4534
0
      count = (k + (un-1) * sizeof (mp_limb_t) + size - 1) / size;
4535
4536
0
      if (!r)
4537
0
  r = gmp_xalloc (count * size);
4538
4539
0
      if (endian == 0)
4540
0
  endian = gmp_detect_endian ();
4541
4542
0
      p = (unsigned char *) r;
4543
4544
0
      word_step = (order != endian) ? 2 * size : 0;
4545
4546
      /* Process bytes from the least significant end, so point p at the
4547
   least significant word. */
4548
0
      if (order == 1)
4549
0
  {
4550
0
    p += size * (count - 1);
4551
0
    word_step = - word_step;
4552
0
  }
4553
4554
      /* And at least significant byte of that word. */
4555
0
      if (endian == 1)
4556
0
  p += (size - 1);
4557
4558
0
      for (bytes = 0, i = 0, k = 0; k < count; k++, p += word_step)
4559
0
  {
4560
0
    size_t j;
4561
0
    for (j = 0; j < size; ++j, p -= (ptrdiff_t) endian)
4562
0
      {
4563
0
        if (sizeof (mp_limb_t) == 1)
4564
0
    {
4565
0
      if (i < un)
4566
0
        *p = u->_mp_d[i++];
4567
0
      else
4568
0
        *p = 0;
4569
0
    }
4570
0
        else
4571
0
    {
4572
0
      int LOCAL_CHAR_BIT = CHAR_BIT;
4573
0
      if (bytes == 0)
4574
0
        {
4575
0
          if (i < un)
4576
0
      limb = u->_mp_d[i++];
4577
0
          bytes = sizeof (mp_limb_t);
4578
0
        }
4579
0
      *p = limb;
4580
0
      limb >>= LOCAL_CHAR_BIT;
4581
0
      bytes--;
4582
0
    }
4583
0
      }
4584
0
  }
4585
0
      assert (i == un);
4586
0
      assert (k == count);
4587
0
    }
4588
4589
0
  if (countp)
4590
0
    *countp = count;
4591
4592
0
  return r;
4593
0
}