/rust/registry/src/index.crates.io-6f17d22bba15001f/half-2.4.1/src/bfloat/convert.rs
Line | Count | Source (jump to first uncovered line) |
1 | | use crate::leading_zeros::leading_zeros_u16; |
2 | | use core::mem; |
3 | | |
4 | | #[inline] |
5 | 0 | pub(crate) const fn f32_to_bf16(value: f32) -> u16 { |
6 | 0 | // TODO: Replace mem::transmute with to_bits() once to_bits is const-stabilized |
7 | 0 | // Convert to raw bytes |
8 | 0 | let x: u32 = unsafe { mem::transmute::<f32, u32>(value) }; |
9 | 0 |
|
10 | 0 | // check for NaN |
11 | 0 | if x & 0x7FFF_FFFFu32 > 0x7F80_0000u32 { |
12 | | // Keep high part of current mantissa but also set most significiant mantissa bit |
13 | 0 | return ((x >> 16) | 0x0040u32) as u16; |
14 | 0 | } |
15 | 0 |
|
16 | 0 | // round and shift |
17 | 0 | let round_bit = 0x0000_8000u32; |
18 | 0 | if (x & round_bit) != 0 && (x & (3 * round_bit - 1)) != 0 { |
19 | 0 | (x >> 16) as u16 + 1 |
20 | | } else { |
21 | 0 | (x >> 16) as u16 |
22 | | } |
23 | 0 | } |
24 | | |
25 | | #[inline] |
26 | 0 | pub(crate) const fn f64_to_bf16(value: f64) -> u16 { |
27 | 0 | // TODO: Replace mem::transmute with to_bits() once to_bits is const-stabilized |
28 | 0 | // Convert to raw bytes, truncating the last 32-bits of mantissa; that precision will always |
29 | 0 | // be lost on half-precision. |
30 | 0 | let val: u64 = unsafe { mem::transmute::<f64, u64>(value) }; |
31 | 0 | let x = (val >> 32) as u32; |
32 | 0 |
|
33 | 0 | // Extract IEEE754 components |
34 | 0 | let sign = x & 0x8000_0000u32; |
35 | 0 | let exp = x & 0x7FF0_0000u32; |
36 | 0 | let man = x & 0x000F_FFFFu32; |
37 | 0 |
|
38 | 0 | // Check for all exponent bits being set, which is Infinity or NaN |
39 | 0 | if exp == 0x7FF0_0000u32 { |
40 | | // Set mantissa MSB for NaN (and also keep shifted mantissa bits). |
41 | | // We also have to check the last 32 bits. |
42 | 0 | let nan_bit = if man == 0 && (val as u32 == 0) { |
43 | 0 | 0 |
44 | | } else { |
45 | 0 | 0x0040u32 |
46 | | }; |
47 | 0 | return ((sign >> 16) | 0x7F80u32 | nan_bit | (man >> 13)) as u16; |
48 | 0 | } |
49 | 0 |
|
50 | 0 | // The number is normalized, start assembling half precision version |
51 | 0 | let half_sign = sign >> 16; |
52 | 0 | // Unbias the exponent, then bias for bfloat16 precision |
53 | 0 | let unbiased_exp = ((exp >> 20) as i64) - 1023; |
54 | 0 | let half_exp = unbiased_exp + 127; |
55 | 0 |
|
56 | 0 | // Check for exponent overflow, return +infinity |
57 | 0 | if half_exp >= 0xFF { |
58 | 0 | return (half_sign | 0x7F80u32) as u16; |
59 | 0 | } |
60 | 0 |
|
61 | 0 | // Check for underflow |
62 | 0 | if half_exp <= 0 { |
63 | | // Check mantissa for what we can do |
64 | 0 | if 7 - half_exp > 21 { |
65 | | // No rounding possibility, so this is a full underflow, return signed zero |
66 | 0 | return half_sign as u16; |
67 | 0 | } |
68 | 0 | // Don't forget about hidden leading mantissa bit when assembling mantissa |
69 | 0 | let man = man | 0x0010_0000u32; |
70 | 0 | let mut half_man = man >> (14 - half_exp); |
71 | 0 | // Check for rounding |
72 | 0 | let round_bit = 1 << (13 - half_exp); |
73 | 0 | if (man & round_bit) != 0 && (man & (3 * round_bit - 1)) != 0 { |
74 | 0 | half_man += 1; |
75 | 0 | } |
76 | | // No exponent for subnormals |
77 | 0 | return (half_sign | half_man) as u16; |
78 | 0 | } |
79 | 0 |
|
80 | 0 | // Rebias the exponent |
81 | 0 | let half_exp = (half_exp as u32) << 7; |
82 | 0 | let half_man = man >> 13; |
83 | 0 | // Check for rounding |
84 | 0 | let round_bit = 0x0000_1000u32; |
85 | 0 | if (man & round_bit) != 0 && (man & (3 * round_bit - 1)) != 0 { |
86 | | // Round it |
87 | 0 | ((half_sign | half_exp | half_man) + 1) as u16 |
88 | | } else { |
89 | 0 | (half_sign | half_exp | half_man) as u16 |
90 | | } |
91 | 0 | } |
92 | | |
93 | | #[inline] |
94 | 0 | pub(crate) const fn bf16_to_f32(i: u16) -> f32 { |
95 | 0 | // TODO: Replace mem::transmute with from_bits() once from_bits is const-stabilized |
96 | 0 | // If NaN, keep current mantissa but also set most significiant mantissa bit |
97 | 0 | if i & 0x7FFFu16 > 0x7F80u16 { |
98 | 0 | unsafe { mem::transmute::<u32, f32>((i as u32 | 0x0040u32) << 16) } |
99 | | } else { |
100 | 0 | unsafe { mem::transmute::<u32, f32>((i as u32) << 16) } |
101 | | } |
102 | 0 | } |
103 | | |
104 | | #[inline] |
105 | 0 | pub(crate) const fn bf16_to_f64(i: u16) -> f64 { |
106 | 0 | // TODO: Replace mem::transmute with from_bits() once from_bits is const-stabilized |
107 | 0 | // Check for signed zero |
108 | 0 | if i & 0x7FFFu16 == 0 { |
109 | 0 | return unsafe { mem::transmute::<u64, f64>((i as u64) << 48) }; |
110 | 0 | } |
111 | 0 |
|
112 | 0 | let half_sign = (i & 0x8000u16) as u64; |
113 | 0 | let half_exp = (i & 0x7F80u16) as u64; |
114 | 0 | let half_man = (i & 0x007Fu16) as u64; |
115 | 0 |
|
116 | 0 | // Check for an infinity or NaN when all exponent bits set |
117 | 0 | if half_exp == 0x7F80u64 { |
118 | | // Check for signed infinity if mantissa is zero |
119 | 0 | if half_man == 0 { |
120 | | return unsafe { |
121 | 0 | mem::transmute::<u64, f64>((half_sign << 48) | 0x7FF0_0000_0000_0000u64) |
122 | | }; |
123 | | } else { |
124 | | // NaN, keep current mantissa but also set most significiant mantissa bit |
125 | | return unsafe { |
126 | 0 | mem::transmute::<u64, f64>( |
127 | 0 | (half_sign << 48) | 0x7FF8_0000_0000_0000u64 | (half_man << 45), |
128 | 0 | ) |
129 | | }; |
130 | | } |
131 | 0 | } |
132 | 0 |
|
133 | 0 | // Calculate double-precision components with adjusted exponent |
134 | 0 | let sign = half_sign << 48; |
135 | 0 | // Unbias exponent |
136 | 0 | let unbiased_exp = ((half_exp as i64) >> 7) - 127; |
137 | 0 |
|
138 | 0 | // Check for subnormals, which will be normalized by adjusting exponent |
139 | 0 | if half_exp == 0 { |
140 | | // Calculate how much to adjust the exponent by |
141 | 0 | let e = leading_zeros_u16(half_man as u16) - 9; |
142 | 0 |
|
143 | 0 | // Rebias and adjust exponent |
144 | 0 | let exp = ((1023 - 127 - e) as u64) << 52; |
145 | 0 | let man = (half_man << (46 + e)) & 0xF_FFFF_FFFF_FFFFu64; |
146 | 0 | return unsafe { mem::transmute::<u64, f64>(sign | exp | man) }; |
147 | 0 | } |
148 | 0 | // Rebias exponent for a normalized normal |
149 | 0 | let exp = ((unbiased_exp + 1023) as u64) << 52; |
150 | 0 | let man = (half_man & 0x007Fu64) << 45; |
151 | 0 | unsafe { mem::transmute::<u64, f64>(sign | exp | man) } |
152 | 0 | } |