Coverage Report

Created: 2023-03-26 06:57

/src/boost/boost/json/detail/ryu/impl/d2s.ipp
Line
Count
Source
1
// Copyright 2018 Ulf Adams
2
//
3
// The contents of this file may be used under the terms of the Apache License,
4
// Version 2.0.
5
//
6
//    (See accompanying file LICENSE-Apache or copy at
7
//     http://www.apache.org/licenses/LICENSE-2.0)
8
//
9
// Alternatively, the contents of this file may be used under the terms of
10
// the Boost Software License, Version 1.0.
11
//    (See accompanying file LICENSE-Boost or copy at
12
//     https://www.boost.org/LICENSE_1_0.txt)
13
//
14
// Unless required by applicable law or agreed to in writing, this software
15
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
16
// KIND, either express or implied.
17
18
// Runtime compiler options:
19
// -DRYU_DEBUG Generate verbose debugging output to stdout.
20
//
21
// -DRYU_ONLY_64_BIT_OPS Avoid using uint128_t or 64-bit intrinsics. Slower,
22
//     depending on your compiler.
23
//
24
// -DRYU_OPTIMIZE_SIZE Use smaller lookup tables. Instead of storing every
25
//     required power of 5, only store every 26th entry, and compute
26
//     intermediate values with a multiplication. This reduces the lookup table
27
//     size by about 10x (only one case, and only double) at the cost of some
28
//     performance. Currently requires MSVC intrinsics.
29
30
/*
31
    This is a derivative work
32
*/
33
34
#ifndef BOOST_JSON_DETAIL_RYU_IMPL_D2S_IPP
35
#define BOOST_JSON_DETAIL_RYU_IMPL_D2S_IPP
36
37
#include <boost/json/detail/ryu/ryu.hpp>
38
#include <cstdlib>
39
#include <cstring>
40
41
#ifdef RYU_DEBUG
42
#include <stdio.h>
43
#endif
44
45
// ABSL avoids uint128_t on Win32 even if __SIZEOF_INT128__ is defined.
46
// Let's do the same for now.
47
#if defined(__SIZEOF_INT128__) && !defined(_MSC_VER) && !defined(RYU_ONLY_64_BIT_OPS)
48
#define BOOST_JSON_RYU_HAS_UINT128
49
#elif defined(_MSC_VER) && !defined(RYU_ONLY_64_BIT_OPS) && defined(_M_X64)
50
#define BOOST_JSON_RYU_HAS_64_BIT_INTRINSICS
51
#endif
52
53
#include <boost/json/detail/ryu/detail/common.hpp>
54
#include <boost/json/detail/ryu/detail/digit_table.hpp>
55
#include <boost/json/detail/ryu/detail/d2s.hpp>
56
#include <boost/json/detail/ryu/detail/d2s_intrinsics.hpp>
57
58
namespace boost {
59
namespace json {
60
namespace detail {
61
62
namespace ryu {
63
namespace detail {
64
65
// We need a 64x128-bit multiplication and a subsequent 128-bit shift.
66
// Multiplication:
67
//   The 64-bit factor is variable and passed in, the 128-bit factor comes
68
//   from a lookup table. We know that the 64-bit factor only has 55
69
//   significant bits (i.e., the 9 topmost bits are zeros). The 128-bit
70
//   factor only has 124 significant bits (i.e., the 4 topmost bits are
71
//   zeros).
72
// Shift:
73
//   In principle, the multiplication result requires 55 + 124 = 179 bits to
74
//   represent. However, we then shift this value to the right by j, which is
75
//   at least j >= 115, so the result is guaranteed to fit into 179 - 115 = 64
76
//   bits. This means that we only need the topmost 64 significant bits of
77
//   the 64x128-bit multiplication.
78
//
79
// There are several ways to do this:
80
// 1. Best case: the compiler exposes a 128-bit type.
81
//    We perform two 64x64-bit multiplications, add the higher 64 bits of the
82
//    lower result to the higher result, and shift by j - 64 bits.
83
//
84
//    We explicitly cast from 64-bit to 128-bit, so the compiler can tell
85
//    that these are only 64-bit inputs, and can map these to the best
86
//    possible sequence of assembly instructions.
87
//    x64 machines happen to have matching assembly instructions for
88
//    64x64-bit multiplications and 128-bit shifts.
89
//
90
// 2. Second best case: the compiler exposes intrinsics for the x64 assembly
91
//    instructions mentioned in 1.
92
//
93
// 3. We only have 64x64 bit instructions that return the lower 64 bits of
94
//    the result, i.e., we have to use plain C.
95
//    Our inputs are less than the full width, so we have three options:
96
//    a. Ignore this fact and just implement the intrinsics manually.
97
//    b. Split both into 31-bit pieces, which guarantees no internal overflow,
98
//       but requires extra work upfront (unless we change the lookup table).
99
//    c. Split only the first factor into 31-bit pieces, which also guarantees
100
//       no internal overflow, but requires extra work since the intermediate
101
//       results are not perfectly aligned.
102
#if defined(BOOST_JSON_RYU_HAS_UINT128)
103
104
// Best case: use 128-bit type.
105
inline
106
std::uint64_t
107
    mulShift(
108
    const std::uint64_t m,
109
    const std::uint64_t* const mul,
110
    const std::int32_t j) noexcept
111
14.5M
{
112
14.5M
    const uint128_t b0 = ((uint128_t) m) * mul[0];
113
14.5M
    const uint128_t b2 = ((uint128_t) m) * mul[1];
114
14.5M
    return (std::uint64_t) (((b0 >> 64) + b2) >> (j - 64));
115
14.5M
}
116
117
inline
118
uint64_t
119
mulShiftAll(
120
    const std::uint64_t m,
121
    const std::uint64_t* const mul,
122
    std::int32_t const j,
123
    std::uint64_t* const vp,
124
    std::uint64_t* const vm,
125
    const std::uint32_t mmShift) noexcept
126
4.83M
{
127
//  m <<= 2;
128
//  uint128_t b0 = ((uint128_t) m) * mul[0]; // 0
129
//  uint128_t b2 = ((uint128_t) m) * mul[1]; // 64
130
//
131
//  uint128_t hi = (b0 >> 64) + b2;
132
//  uint128_t lo = b0 & 0xffffffffffffffffull;
133
//  uint128_t factor = (((uint128_t) mul[1]) << 64) + mul[0];
134
//  uint128_t vpLo = lo + (factor << 1);
135
//  *vp = (std::uint64_t) ((hi + (vpLo >> 64)) >> (j - 64));
136
//  uint128_t vmLo = lo - (factor << mmShift);
137
//  *vm = (std::uint64_t) ((hi + (vmLo >> 64) - (((uint128_t) 1ull) << 64)) >> (j - 64));
138
//  return (std::uint64_t) (hi >> (j - 64));
139
4.83M
    *vp = mulShift(4 * m + 2, mul, j);
140
4.83M
    *vm = mulShift(4 * m - 1 - mmShift, mul, j);
141
4.83M
    return mulShift(4 * m, mul, j);
142
4.83M
}
143
144
#elif defined(BOOST_JSON_RYU_HAS_64_BIT_INTRINSICS)
145
146
inline
147
std::uint64_t
148
mulShift(
149
    const std::uint64_t m,
150
    const std::uint64_t* const mul,
151
    const std::int32_t j) noexcept
152
{
153
    // m is maximum 55 bits
154
    std::uint64_t high1;                                   // 128
155
    std::uint64_t const low1 = umul128(m, mul[1], &high1); // 64
156
    std::uint64_t high0;                                   // 64
157
    umul128(m, mul[0], &high0);                            // 0
158
    std::uint64_t const sum = high0 + low1;
159
    if (sum < high0)
160
        ++high1; // overflow into high1
161
    return shiftright128(sum, high1, j - 64);
162
}
163
164
inline
165
std::uint64_t
166
mulShiftAll(
167
    const std::uint64_t m,
168
    const std::uint64_t* const mul,
169
    const std::int32_t j,
170
    std::uint64_t* const vp,
171
    std::uint64_t* const vm,
172
    const std::uint32_t mmShift) noexcept
173
{
174
    *vp = mulShift(4 * m + 2, mul, j);
175
    *vm = mulShift(4 * m - 1 - mmShift, mul, j);
176
    return mulShift(4 * m, mul, j);
177
}
178
179
#else // !defined(BOOST_JSON_RYU_HAS_UINT128) && !defined(BOOST_JSON_RYU_HAS_64_BIT_INTRINSICS)
180
181
inline
182
std::uint64_t
183
mulShiftAll(
184
    std::uint64_t m,
185
    const std::uint64_t* const mul,
186
    const std::int32_t j,
187
    std::uint64_t* const vp,
188
    std::uint64_t* const vm,
189
    const std::uint32_t mmShift)
190
{
191
    m <<= 1;
192
    // m is maximum 55 bits
193
    std::uint64_t tmp;
194
    std::uint64_t const lo = umul128(m, mul[0], &tmp);
195
    std::uint64_t hi;
196
    std::uint64_t const mid = tmp + umul128(m, mul[1], &hi);
197
    hi += mid < tmp; // overflow into hi
198
199
    const std::uint64_t lo2 = lo + mul[0];
200
    const std::uint64_t mid2 = mid + mul[1] + (lo2 < lo);
201
    const std::uint64_t hi2 = hi + (mid2 < mid);
202
    *vp = shiftright128(mid2, hi2, (std::uint32_t)(j - 64 - 1));
203
204
    if (mmShift == 1)
205
    {
206
        const std::uint64_t lo3 = lo - mul[0];
207
        const std::uint64_t mid3 = mid - mul[1] - (lo3 > lo);
208
        const std::uint64_t hi3 = hi - (mid3 > mid);
209
        *vm = shiftright128(mid3, hi3, (std::uint32_t)(j - 64 - 1));
210
    }
211
    else
212
    {
213
        const std::uint64_t lo3 = lo + lo;
214
        const std::uint64_t mid3 = mid + mid + (lo3 < lo);
215
        const std::uint64_t hi3 = hi + hi + (mid3 < mid);
216
        const std::uint64_t lo4 = lo3 - mul[0];
217
        const std::uint64_t mid4 = mid3 - mul[1] - (lo4 > lo3);
218
        const std::uint64_t hi4 = hi3 - (mid4 > mid3);
219
        *vm = shiftright128(mid4, hi4, (std::uint32_t)(j - 64));
220
    }
221
222
    return shiftright128(mid, hi, (std::uint32_t)(j - 64 - 1));
223
}
224
225
#endif // BOOST_JSON_RYU_HAS_64_BIT_INTRINSICS
226
227
inline
228
std::uint32_t
229
decimalLength17(
230
    const std::uint64_t v)
231
4.87M
{
232
    // This is slightly faster than a loop.
233
    // The average output length is 16.38 digits, so we check high-to-low.
234
    // Function precondition: v is not an 18, 19, or 20-digit number.
235
    // (17 digits are sufficient for round-tripping.)
236
4.87M
    BOOST_ASSERT(v < 100000000000000000L);
237
4.87M
    if (v >= 10000000000000000L) { return 17; }
238
163k
    if (v >= 1000000000000000L) { return 16; }
239
118k
    if (v >= 100000000000000L) { return 15; }
240
118k
    if (v >= 10000000000000L) { return 14; }
241
117k
    if (v >= 1000000000000L) { return 13; }
242
115k
    if (v >= 100000000000L) { return 12; }
243
114k
    if (v >= 10000000000L) { return 11; }
244
114k
    if (v >= 1000000000L) { return 10; }
245
113k
    if (v >= 100000000L) { return 9; }
246
113k
    if (v >= 10000000L) { return 8; }
247
112k
    if (v >= 1000000L) { return 7; }
248
111k
    if (v >= 100000L) { return 6; }
249
110k
    if (v >= 10000L) { return 5; }
250
108k
    if (v >= 1000L) { return 4; }
251
106k
    if (v >= 100L) { return 3; }
252
103k
    if (v >= 10L) { return 2; }
253
87.9k
    return 1;
254
103k
}
255
256
// A floating decimal representing m * 10^e.
257
struct floating_decimal_64
258
{
259
    std::uint64_t mantissa;
260
    // Decimal exponent's range is -324 to 308
261
    // inclusive, and can fit in a short if needed.
262
    std::int32_t exponent;
263
};
264
265
inline
266
floating_decimal_64
267
d2d(
268
    const std::uint64_t ieeeMantissa,
269
    const std::uint32_t ieeeExponent)
270
4.83M
{
271
4.83M
    std::int32_t e2;
272
4.83M
    std::uint64_t m2;
273
4.83M
    if (ieeeExponent == 0)
274
307
    {
275
        // We subtract 2 so that the bounds computation has 2 additional bits.
276
307
        e2 = 1 - DOUBLE_BIAS - DOUBLE_MANTISSA_BITS - 2;
277
307
        m2 = ieeeMantissa;
278
307
    }
279
4.83M
    else
280
4.83M
    {
281
4.83M
        e2 = (std::int32_t)ieeeExponent - DOUBLE_BIAS - DOUBLE_MANTISSA_BITS - 2;
282
4.83M
        m2 = (1ull << DOUBLE_MANTISSA_BITS) | ieeeMantissa;
283
4.83M
    }
284
4.83M
    const bool even = (m2 & 1) == 0;
285
4.83M
    const bool acceptBounds = even;
286
287
#ifdef RYU_DEBUG
288
    printf("-> %" PRIu64 " * 2^%d\n", m2, e2 + 2);
289
#endif
290
291
    // Step 2: Determine the interval of valid decimal representations.
292
4.83M
    const std::uint64_t mv = 4 * m2;
293
    // Implicit bool -> int conversion. True is 1, false is 0.
294
4.83M
    const std::uint32_t mmShift = ieeeMantissa != 0 || ieeeExponent <= 1;
295
    // We would compute mp and mm like this:
296
    // uint64_t mp = 4 * m2 + 2;
297
    // uint64_t mm = mv - 1 - mmShift;
298
299
    // Step 3: Convert to a decimal power base using 128-bit arithmetic.
300
4.83M
    std::uint64_t vr, vp, vm;
301
4.83M
    std::int32_t e10;
302
4.83M
    bool vmIsTrailingZeros = false;
303
4.83M
    bool vrIsTrailingZeros = false;
304
4.83M
    if (e2 >= 0) {
305
        // I tried special-casing q == 0, but there was no effect on performance.
306
        // This expression is slightly faster than max(0, log10Pow2(e2) - 1).
307
4.76M
        const std::uint32_t q = log10Pow2(e2) - (e2 > 3);
308
4.76M
        e10 = (std::int32_t)q;
309
4.76M
        const std::int32_t k = DOUBLE_POW5_INV_BITCOUNT + pow5bits((int32_t)q) - 1;
310
4.76M
        const std::int32_t i = -e2 + (std::int32_t)q + k;
311
#if defined(BOOST_JSON_RYU_OPTIMIZE_SIZE)
312
        uint64_t pow5[2];
313
        double_computeInvPow5(q, pow5);
314
        vr = mulShiftAll(m2, pow5, i, &vp, &vm, mmShift);
315
#else
316
4.76M
        vr = mulShiftAll(m2, DOUBLE_POW5_INV_SPLIT()[q], i, &vp, &vm, mmShift);
317
4.76M
#endif
318
#ifdef RYU_DEBUG
319
        printf("%" PRIu64 " * 2^%d / 10^%u\n", mv, e2, q);
320
        printf("V+=%" PRIu64 "\nV =%" PRIu64 "\nV-=%" PRIu64 "\n", vp, vr, vm);
321
#endif
322
4.76M
        if (q <= 21)
323
21.8k
        {
324
            // This should use q <= 22, but I think 21 is also safe. Smaller values
325
            // may still be safe, but it's more difficult to reason about them.
326
            // Only one of mp, mv, and mm can be a multiple of 5, if any.
327
21.8k
            const std::uint32_t mvMod5 = ((std::uint32_t)mv) - 5 * ((std::uint32_t)div5(mv));
328
21.8k
            if (mvMod5 == 0)
329
4.50k
            {
330
4.50k
                vrIsTrailingZeros = multipleOfPowerOf5(mv, q);
331
4.50k
            }
332
17.3k
            else if (acceptBounds)
333
12.9k
            {
334
                // Same as min(e2 + (~mm & 1), pow5Factor(mm)) >= q
335
                // <=> e2 + (~mm & 1) >= q && pow5Factor(mm) >= q
336
                // <=> true && pow5Factor(mm) >= q, since e2 >= q.
337
12.9k
                vmIsTrailingZeros = multipleOfPowerOf5(mv - 1 - mmShift, q);
338
12.9k
            }
339
4.46k
            else
340
4.46k
            {
341
                // Same as min(e2 + 1, pow5Factor(mp)) >= q.
342
4.46k
                vp -= multipleOfPowerOf5(mv + 2, q);
343
4.46k
            }
344
21.8k
        }
345
4.76M
    }
346
68.6k
    else
347
68.6k
    {
348
        // This expression is slightly faster than max(0, log10Pow5(-e2) - 1).
349
68.6k
        const std::uint32_t q = log10Pow5(-e2) - (-e2 > 1);
350
68.6k
        e10 = (std::int32_t)q + e2;
351
68.6k
        const std::int32_t i = -e2 - (std::int32_t)q;
352
68.6k
        const std::int32_t k = pow5bits(i) - DOUBLE_POW5_BITCOUNT;
353
68.6k
        const std::int32_t j = (std::int32_t)q - k;
354
#if defined(BOOST_JSON_RYU_OPTIMIZE_SIZE)
355
        std::uint64_t pow5[2];
356
        double_computePow5(i, pow5);
357
        vr = mulShiftAll(m2, pow5, j, &vp, &vm, mmShift);
358
#else
359
68.6k
        vr = mulShiftAll(m2, DOUBLE_POW5_SPLIT()[i], j, &vp, &vm, mmShift);
360
68.6k
#endif
361
#ifdef RYU_DEBUG
362
        printf("%" PRIu64 " * 5^%d / 10^%u\n", mv, -e2, q);
363
        printf("%u %d %d %d\n", q, i, k, j);
364
        printf("V+=%" PRIu64 "\nV =%" PRIu64 "\nV-=%" PRIu64 "\n", vp, vr, vm);
365
#endif
366
68.6k
        if (q <= 1)
367
2.77k
        {
368
            // {vr,vp,vm} is trailing zeros if {mv,mp,mm} has at least q trailing 0 bits.
369
            // mv = 4 * m2, so it always has at least two trailing 0 bits.
370
2.77k
            vrIsTrailingZeros = true;
371
2.77k
            if (acceptBounds)
372
1.97k
            {
373
                // mm = mv - 1 - mmShift, so it has 1 trailing 0 bit iff mmShift == 1.
374
1.97k
                vmIsTrailingZeros = mmShift == 1;
375
1.97k
            }
376
805
            else
377
805
            {
378
                // mp = mv + 2, so it always has at least one trailing 0 bit.
379
805
                --vp;
380
805
            }
381
2.77k
        }
382
65.8k
        else if (q < 63)
383
9.65k
        {
384
            // TODO(ulfjack): Use a tighter bound here.
385
            // We want to know if the full product has at least q trailing zeros.
386
            // We need to compute min(p2(mv), p5(mv) - e2) >= q
387
            // <=> p2(mv) >= q && p5(mv) - e2 >= q
388
            // <=> p2(mv) >= q (because -e2 >= q)
389
9.65k
            vrIsTrailingZeros = multipleOfPowerOf2(mv, q);
390
#ifdef RYU_DEBUG
391
            printf("vr is trailing zeros=%s\n", vrIsTrailingZeros ? "true" : "false");
392
#endif
393
9.65k
        }
394
68.6k
    }
395
#ifdef RYU_DEBUG
396
    printf("e10=%d\n", e10);
397
    printf("V+=%" PRIu64 "\nV =%" PRIu64 "\nV-=%" PRIu64 "\n", vp, vr, vm);
398
    printf("vm is trailing zeros=%s\n", vmIsTrailingZeros ? "true" : "false");
399
    printf("vr is trailing zeros=%s\n", vrIsTrailingZeros ? "true" : "false");
400
#endif
401
402
    // Step 4: Find the shortest decimal representation in the interval of valid representations.
403
4.83M
    std::int32_t removed = 0;
404
4.83M
    std::uint8_t lastRemovedDigit = 0;
405
4.83M
    std::uint64_t output;
406
    // On average, we remove ~2 digits.
407
4.83M
    if (vmIsTrailingZeros || vrIsTrailingZeros)
408
5.54k
    {
409
        // General case, which happens rarely (~0.7%).
410
5.54k
        for (;;)
411
46.1k
        {
412
46.1k
            const std::uint64_t vpDiv10 = div10(vp);
413
46.1k
            const std::uint64_t vmDiv10 = div10(vm);
414
46.1k
            if (vpDiv10 <= vmDiv10)
415
5.54k
                break;
416
40.5k
            const std::uint32_t vmMod10 = ((std::uint32_t)vm) - 10 * ((std::uint32_t)vmDiv10);
417
40.5k
            const std::uint64_t vrDiv10 = div10(vr);
418
40.5k
            const std::uint32_t vrMod10 = ((std::uint32_t)vr) - 10 * ((std::uint32_t)vrDiv10);
419
40.5k
            vmIsTrailingZeros &= vmMod10 == 0;
420
40.5k
            vrIsTrailingZeros &= lastRemovedDigit == 0;
421
40.5k
            lastRemovedDigit = (uint8_t)vrMod10;
422
40.5k
            vr = vrDiv10;
423
40.5k
            vp = vpDiv10;
424
40.5k
            vm = vmDiv10;
425
40.5k
            ++removed;
426
40.5k
        }
427
#ifdef RYU_DEBUG
428
        printf("V+=%" PRIu64 "\nV =%" PRIu64 "\nV-=%" PRIu64 "\n", vp, vr, vm);
429
        printf("d-10=%s\n", vmIsTrailingZeros ? "true" : "false");
430
#endif
431
5.54k
        if (vmIsTrailingZeros)
432
271
        {
433
271
            for (;;)
434
3.82k
            {
435
3.82k
                const std::uint64_t vmDiv10 = div10(vm);
436
3.82k
                const std::uint32_t vmMod10 = ((std::uint32_t)vm) - 10 * ((std::uint32_t)vmDiv10);
437
3.82k
                if (vmMod10 != 0)
438
271
                    break;
439
3.55k
                const std::uint64_t vpDiv10 = div10(vp);
440
3.55k
                const std::uint64_t vrDiv10 = div10(vr);
441
3.55k
                const std::uint32_t vrMod10 = ((std::uint32_t)vr) - 10 * ((std::uint32_t)vrDiv10);
442
3.55k
                vrIsTrailingZeros &= lastRemovedDigit == 0;
443
3.55k
                lastRemovedDigit = (uint8_t)vrMod10;
444
3.55k
                vr = vrDiv10;
445
3.55k
                vp = vpDiv10;
446
3.55k
                vm = vmDiv10;
447
3.55k
                ++removed;
448
3.55k
            }
449
271
        }
450
#ifdef RYU_DEBUG
451
        printf("%" PRIu64 " %d\n", vr, lastRemovedDigit);
452
        printf("vr is trailing zeros=%s\n", vrIsTrailingZeros ? "true" : "false");
453
#endif
454
5.54k
        if (vrIsTrailingZeros && lastRemovedDigit == 5 && vr % 2 == 0)
455
815
        {
456
            // Round even if the exact number is .....50..0.
457
815
            lastRemovedDigit = 4;
458
815
        }
459
        // We need to take vr + 1 if vr is outside bounds or we need to round up.
460
5.54k
        output = vr + ((vr == vm && (!acceptBounds || !vmIsTrailingZeros)) || lastRemovedDigit >= 5);
461
5.54k
    }
462
4.82M
    else
463
4.82M
    {
464
        // Specialized for the common case (~99.3%). Percentages below are relative to this.
465
4.82M
        bool roundUp = false;
466
4.82M
        const std::uint64_t vpDiv100 = div100(vp);
467
4.82M
        const std::uint64_t vmDiv100 = div100(vm);
468
4.82M
        if (vpDiv100 > vmDiv100)
469
4.75M
        {
470
            // Optimization: remove two digits at a time (~86.2%).
471
4.75M
            const std::uint64_t vrDiv100 = div100(vr);
472
4.75M
            const std::uint32_t vrMod100 = ((std::uint32_t)vr) - 100 * ((std::uint32_t)vrDiv100);
473
4.75M
            roundUp = vrMod100 >= 50;
474
4.75M
            vr = vrDiv100;
475
4.75M
            vp = vpDiv100;
476
4.75M
            vm = vmDiv100;
477
4.75M
            removed += 2;
478
4.75M
        }
479
        // Loop iterations below (approximately), without optimization above:
480
        // 0: 0.03%, 1: 13.8%, 2: 70.6%, 3: 14.0%, 4: 1.40%, 5: 0.14%, 6+: 0.02%
481
        // Loop iterations below (approximately), with optimization above:
482
        // 0: 70.6%, 1: 27.8%, 2: 1.40%, 3: 0.14%, 4+: 0.02%
483
4.82M
        for (;;)
484
5.93M
        {
485
5.93M
            const std::uint64_t vpDiv10 = div10(vp);
486
5.93M
            const std::uint64_t vmDiv10 = div10(vm);
487
5.93M
            if (vpDiv10 <= vmDiv10)
488
4.82M
                break;
489
1.11M
            const std::uint64_t vrDiv10 = div10(vr);
490
1.11M
            const std::uint32_t vrMod10 = ((std::uint32_t)vr) - 10 * ((std::uint32_t)vrDiv10);
491
1.11M
            roundUp = vrMod10 >= 5;
492
1.11M
            vr = vrDiv10;
493
1.11M
            vp = vpDiv10;
494
1.11M
            vm = vmDiv10;
495
1.11M
            ++removed;
496
1.11M
        }
497
#ifdef RYU_DEBUG
498
        printf("%" PRIu64 " roundUp=%s\n", vr, roundUp ? "true" : "false");
499
        printf("vr is trailing zeros=%s\n", vrIsTrailingZeros ? "true" : "false");
500
#endif
501
        // We need to take vr + 1 if vr is outside bounds or we need to round up.
502
4.82M
        output = vr + (vr == vm || roundUp);
503
4.82M
    }
504
4.83M
    const std::int32_t exp = e10 + removed;
505
506
#ifdef RYU_DEBUG
507
    printf("V+=%" PRIu64 "\nV =%" PRIu64 "\nV-=%" PRIu64 "\n", vp, vr, vm);
508
    printf("O=%" PRIu64 "\n", output);
509
    printf("EXP=%d\n", exp);
510
#endif
511
512
4.83M
    floating_decimal_64 fd;
513
4.83M
    fd.exponent = exp;
514
4.83M
    fd.mantissa = output;
515
4.83M
    return fd;
516
4.83M
}
517
518
inline
519
int
520
to_chars(
521
    const floating_decimal_64 v,
522
    const bool sign,
523
    char* const result)
524
4.87M
{
525
    // Step 5: Print the decimal representation.
526
4.87M
    int index = 0;
527
4.87M
    if (sign)
528
4.84k
        result[index++] = '-';
529
530
4.87M
    std::uint64_t output = v.mantissa;
531
4.87M
    std::uint32_t const olength = decimalLength17(output);
532
533
#ifdef RYU_DEBUG
534
    printf("DIGITS=%" PRIu64 "\n", v.mantissa);
535
    printf("OLEN=%u\n", olength);
536
    printf("EXP=%u\n", v.exponent + olength);
537
#endif
538
539
    // Print the decimal digits.
540
    // The following code is equivalent to:
541
    // for (uint32_t i = 0; i < olength - 1; ++i) {
542
    //   const uint32_t c = output % 10; output /= 10;
543
    //   result[index + olength - i] = (char) ('0' + c);
544
    // }
545
    // result[index] = '0' + output % 10;
546
547
4.87M
    std::uint32_t i = 0;
548
    // We prefer 32-bit operations, even on 64-bit platforms.
549
    // We have at most 17 digits, and uint32_t can store 9 digits.
550
    // If output doesn't fit into uint32_t, we cut off 8 digits,
551
    // so the rest will fit into uint32_t.
552
4.87M
    if ((output >> 32) != 0)
553
4.76M
    {
554
        // Expensive 64-bit division.
555
4.76M
        std::uint64_t const q = div1e8(output);
556
4.76M
        std::uint32_t output2 = ((std::uint32_t)output) - 100000000 * ((std::uint32_t)q);
557
4.76M
        output = q;
558
559
4.76M
        const std::uint32_t c = output2 % 10000;
560
4.76M
        output2 /= 10000;
561
4.76M
        const std::uint32_t d = output2 % 10000;
562
4.76M
        const std::uint32_t c0 = (c % 100) << 1;
563
4.76M
        const std::uint32_t c1 = (c / 100) << 1;
564
4.76M
        const std::uint32_t d0 = (d % 100) << 1;
565
4.76M
        const std::uint32_t d1 = (d / 100) << 1;
566
4.76M
        std::memcpy(result + index + olength - i - 1, DIGIT_TABLE() + c0, 2);
567
4.76M
        std::memcpy(result + index + olength - i - 3, DIGIT_TABLE() + c1, 2);
568
4.76M
        std::memcpy(result + index + olength - i - 5, DIGIT_TABLE() + d0, 2);
569
4.76M
        std::memcpy(result + index + olength - i - 7, DIGIT_TABLE() + d1, 2);
570
4.76M
        i += 8;
571
4.76M
    }
572
4.87M
    uint32_t output2 = (std::uint32_t)output;
573
14.3M
    while (output2 >= 10000)
574
9.47M
    {
575
9.47M
#ifdef __clang__ // https://bugs.llvm.org/show_bug.cgi?id=38217
576
9.47M
        const uint32_t c = output2 - 10000 * (output2 / 10000);
577
#else
578
        const uint32_t c = output2 % 10000;
579
#endif
580
9.47M
        output2 /= 10000;
581
9.47M
        const uint32_t c0 = (c % 100) << 1;
582
9.47M
        const uint32_t c1 = (c / 100) << 1;
583
9.47M
        memcpy(result + index + olength - i - 1, DIGIT_TABLE() + c0, 2);
584
9.47M
        memcpy(result + index + olength - i - 3, DIGIT_TABLE() + c1, 2);
585
9.47M
        i += 4;
586
9.47M
    }
587
4.87M
    if (output2 >= 100) {
588
52.2k
        const uint32_t c = (output2 % 100) << 1;
589
52.2k
        output2 /= 100;
590
52.2k
        memcpy(result + index + olength - i - 1, DIGIT_TABLE() + c, 2);
591
52.2k
        i += 2;
592
52.2k
    }
593
4.87M
    if (output2 >= 10) {
594
66.0k
        const uint32_t c = output2 << 1;
595
        // We can't use memcpy here: the decimal dot goes between these two digits.
596
66.0k
        result[index + olength - i] = DIGIT_TABLE()[c + 1];
597
66.0k
        result[index] = DIGIT_TABLE()[c];
598
66.0k
    }
599
4.80M
    else {
600
4.80M
        result[index] = (char)('0' + output2);
601
4.80M
    }
602
603
    // Print decimal point if needed.
604
4.87M
    if (olength > 1) {
605
4.78M
        result[index + 1] = '.';
606
4.78M
        index += olength + 1;
607
4.78M
    }
608
87.9k
    else {
609
87.9k
        ++index;
610
87.9k
    }
611
612
    // Print the exponent.
613
4.87M
    result[index++] = 'E';
614
4.87M
    int32_t exp = v.exponent + (int32_t)olength - 1;
615
4.87M
    if (exp < 0) {
616
58.3k
        result[index++] = '-';
617
58.3k
        exp = -exp;
618
58.3k
    }
619
620
4.87M
    if (exp >= 100) {
621
2.70k
        const int32_t c = exp % 10;
622
2.70k
        memcpy(result + index, DIGIT_TABLE() + 2 * (exp / 10), 2);
623
2.70k
        result[index + 2] = (char)('0' + c);
624
2.70k
        index += 3;
625
2.70k
    }
626
4.87M
    else if (exp >= 10) {
627
4.82M
        memcpy(result + index, DIGIT_TABLE() + 2 * exp, 2);
628
4.82M
        index += 2;
629
4.82M
    }
630
48.6k
    else {
631
48.6k
        result[index++] = (char)('0' + exp);
632
48.6k
    }
633
634
4.87M
    return index;
635
4.87M
}
636
637
static inline bool d2d_small_int(const uint64_t ieeeMantissa, const uint32_t ieeeExponent,
638
4.87M
  floating_decimal_64* const v) {
639
4.87M
  const uint64_t m2 = (1ull << DOUBLE_MANTISSA_BITS) | ieeeMantissa;
640
4.87M
  const int32_t e2 = (int32_t) ieeeExponent - DOUBLE_BIAS - DOUBLE_MANTISSA_BITS;
641
642
4.87M
  if (e2 > 0) {
643
    // f = m2 * 2^e2 >= 2^53 is an integer.
644
    // Ignore this case for now.
645
4.76M
    return false;
646
4.76M
  }
647
648
107k
  if (e2 < -52) {
649
    // f < 1.
650
58.3k
    return false;
651
58.3k
  }
652
653
  // Since 2^52 <= m2 < 2^53 and 0 <= -e2 <= 52: 1 <= f = m2 / 2^-e2 < 2^53.
654
  // Test if the lower -e2 bits of the significand are 0, i.e. whether the fraction is 0.
655
49.2k
  const uint64_t mask = (1ull << -e2) - 1;
656
49.2k
  const uint64_t fraction = m2 & mask;
657
49.2k
  if (fraction != 0) {
658
8.35k
    return false;
659
8.35k
  }
660
661
  // f is an integer in the range [1, 2^53).
662
  // Note: mantissa might contain trailing (decimal) 0's.
663
  // Note: since 2^53 < 10^16, there is no need to adjust decimalLength17().
664
40.9k
  v->mantissa = m2 >> -e2;
665
40.9k
  v->exponent = 0;
666
40.9k
  return true;
667
49.2k
}
668
669
} // detail
670
671
int
672
d2s_buffered_n(
673
    double f,
674
    char* result) noexcept
675
4.91M
{
676
4.91M
    using namespace detail;
677
    // Step 1: Decode the floating-point number, and unify normalized and subnormal cases.
678
4.91M
    std::uint64_t const bits = double_to_bits(f);
679
680
#ifdef RYU_DEBUG
681
    printf("IN=");
682
    for (std::int32_t bit = 63; bit >= 0; --bit) {
683
        printf("%d", (int)((bits >> bit) & 1));
684
    }
685
    printf("\n");
686
#endif
687
688
    // Decode bits into sign, mantissa, and exponent.
689
4.91M
    const bool ieeeSign = ((bits >> (DOUBLE_MANTISSA_BITS + DOUBLE_EXPONENT_BITS)) & 1) != 0;
690
4.91M
    const std::uint64_t ieeeMantissa = bits & ((1ull << DOUBLE_MANTISSA_BITS) - 1);
691
4.91M
    const std::uint32_t ieeeExponent = (std::uint32_t)((bits >> DOUBLE_MANTISSA_BITS) & ((1u << DOUBLE_EXPONENT_BITS) - 1));
692
    // Case distinction; exit early for the easy cases.
693
4.91M
    if (ieeeExponent == ((1u << DOUBLE_EXPONENT_BITS) - 1u) || (ieeeExponent == 0 && ieeeMantissa == 0)) {
694
45.6k
        return copy_special_str(result, ieeeSign, ieeeExponent != 0, ieeeMantissa != 0);
695
45.6k
    }
696
697
4.87M
    floating_decimal_64 v;
698
4.87M
    const bool isSmallInt = d2d_small_int(ieeeMantissa, ieeeExponent, &v);
699
4.87M
    if (isSmallInt) {
700
        // For small integers in the range [1, 2^53), v.mantissa might contain trailing (decimal) zeros.
701
        // For scientific notation we need to move these zeros into the exponent.
702
        // (This is not needed for fixed-point notation, so it might be beneficial to trim
703
        // trailing zeros in to_chars only if needed - once fixed-point notation output is implemented.)
704
351k
        for (;;) {
705
351k
            std::uint64_t const q = div10(v.mantissa);
706
351k
            std::uint32_t const r = ((std::uint32_t) v.mantissa) - 10 * ((std::uint32_t) q);
707
351k
            if (r != 0)
708
40.9k
                break;
709
310k
            v.mantissa = q;
710
310k
            ++v.exponent;
711
310k
        }
712
40.9k
    }
713
4.83M
    else {
714
4.83M
        v = d2d(ieeeMantissa, ieeeExponent);
715
4.83M
    }
716
717
4.87M
    return to_chars(v, ieeeSign, result);
718
4.91M
}
719
720
} // ryu
721
722
} // detail
723
} // namespace json
724
} // namespace boost
725
726
#endif