/src/abseil-cpp/absl/base/internal/endian.h
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright 2017 The Abseil Authors. |
2 | | // |
3 | | // Licensed under the Apache License, Version 2.0 (the "License"); |
4 | | // you may not use this file except in compliance with the License. |
5 | | // You may obtain a copy of the License at |
6 | | // |
7 | | // https://www.apache.org/licenses/LICENSE-2.0 |
8 | | // |
9 | | // Unless required by applicable law or agreed to in writing, software |
10 | | // distributed under the License is distributed on an "AS IS" BASIS, |
11 | | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | | // See the License for the specific language governing permissions and |
13 | | // limitations under the License. |
14 | | // |
15 | | // This file is for Abseil internal use only. |
16 | | // See //absl/numeric/bits.h for supported functions related to endian-ness. |
17 | | |
18 | | #ifndef ABSL_BASE_INTERNAL_ENDIAN_H_ |
19 | | #define ABSL_BASE_INTERNAL_ENDIAN_H_ |
20 | | |
21 | | #include <cstdint> |
22 | | #include <cstdlib> |
23 | | |
24 | | #include "absl/base/casts.h" |
25 | | #include "absl/base/config.h" |
26 | | #include "absl/base/internal/unaligned_access.h" |
27 | | #include "absl/base/nullability.h" |
28 | | #include "absl/base/port.h" |
29 | | |
30 | | namespace absl { |
31 | | ABSL_NAMESPACE_BEGIN |
32 | | |
33 | 0 | constexpr uint64_t gbswap_64(uint64_t x) { |
34 | 0 | #if ABSL_HAVE_BUILTIN(__builtin_bswap64) || defined(__GNUC__) |
35 | 0 | return __builtin_bswap64(x); |
36 | 0 | #else |
37 | 0 | return (((x & uint64_t{0xFF}) << 56) | |
38 | 0 | ((x & uint64_t{0xFF00}) << 40) | |
39 | 0 | ((x & uint64_t{0xFF0000}) << 24) | |
40 | 0 | ((x & uint64_t{0xFF000000}) << 8) | |
41 | 0 | ((x & uint64_t{0xFF00000000}) >> 8) | |
42 | 0 | ((x & uint64_t{0xFF0000000000}) >> 24) | |
43 | 0 | ((x & uint64_t{0xFF000000000000}) >> 40) | |
44 | 0 | ((x & uint64_t{0xFF00000000000000}) >> 56)); |
45 | 0 | #endif |
46 | 0 | } |
47 | | |
48 | 0 | constexpr uint32_t gbswap_32(uint32_t x) { |
49 | 0 | #if ABSL_HAVE_BUILTIN(__builtin_bswap32) || defined(__GNUC__) |
50 | 0 | return __builtin_bswap32(x); |
51 | 0 | #else |
52 | 0 | return (((x & uint32_t{0xFF}) << 24) | |
53 | 0 | ((x & uint32_t{0xFF00}) << 8) | |
54 | 0 | ((x & uint32_t{0xFF0000}) >> 8) | |
55 | 0 | ((x & uint32_t{0xFF000000}) >> 24)); |
56 | 0 | #endif |
57 | 0 | } |
58 | | |
59 | 0 | constexpr uint16_t gbswap_16(uint16_t x) { |
60 | 0 | #if ABSL_HAVE_BUILTIN(__builtin_bswap16) || defined(__GNUC__) |
61 | 0 | return __builtin_bswap16(x); |
62 | 0 | #else |
63 | 0 | return (((x & uint16_t{0xFF}) << 8) | |
64 | 0 | ((x & uint16_t{0xFF00}) >> 8)); |
65 | 0 | #endif |
66 | 0 | } |
67 | | |
68 | | #ifdef ABSL_IS_LITTLE_ENDIAN |
69 | | |
70 | | // Portable definitions for htonl (host-to-network) and friends on little-endian |
71 | | // architectures. |
72 | 0 | inline uint16_t ghtons(uint16_t x) { return gbswap_16(x); } |
73 | 0 | inline uint32_t ghtonl(uint32_t x) { return gbswap_32(x); } |
74 | 0 | inline uint64_t ghtonll(uint64_t x) { return gbswap_64(x); } |
75 | | |
76 | | #elif defined ABSL_IS_BIG_ENDIAN |
77 | | |
78 | | // Portable definitions for htonl (host-to-network) etc on big-endian |
79 | | // architectures. These definitions are simpler since the host byte order is the |
80 | | // same as network byte order. |
81 | | inline uint16_t ghtons(uint16_t x) { return x; } |
82 | | inline uint32_t ghtonl(uint32_t x) { return x; } |
83 | | inline uint64_t ghtonll(uint64_t x) { return x; } |
84 | | |
85 | | #else |
86 | | #error \ |
87 | | "Unsupported byte order: Either ABSL_IS_BIG_ENDIAN or " \ |
88 | | "ABSL_IS_LITTLE_ENDIAN must be defined" |
89 | | #endif // byte order |
90 | | |
91 | 0 | inline uint16_t gntohs(uint16_t x) { return ghtons(x); } |
92 | 0 | inline uint32_t gntohl(uint32_t x) { return ghtonl(x); } |
93 | 0 | inline uint64_t gntohll(uint64_t x) { return ghtonll(x); } |
94 | | |
95 | | // Utilities to convert numbers between the current hosts's native byte |
96 | | // order and little-endian byte order |
97 | | // |
98 | | // Load/Store methods are alignment safe |
99 | | namespace little_endian { |
100 | | // Conversion functions. |
101 | | #ifdef ABSL_IS_LITTLE_ENDIAN |
102 | | |
103 | 0 | inline uint16_t FromHost16(uint16_t x) { return x; } |
104 | 0 | inline uint16_t ToHost16(uint16_t x) { return x; } |
105 | | |
106 | 0 | inline uint32_t FromHost32(uint32_t x) { return x; } |
107 | 0 | inline uint32_t ToHost32(uint32_t x) { return x; } |
108 | | |
109 | 172k | inline uint64_t FromHost64(uint64_t x) { return x; } |
110 | 60.5k | inline uint64_t ToHost64(uint64_t x) { return x; } |
111 | | |
112 | 0 | inline constexpr bool IsLittleEndian() { return true; } |
113 | | |
114 | | #elif defined ABSL_IS_BIG_ENDIAN |
115 | | |
116 | | inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); } |
117 | | inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); } |
118 | | |
119 | | inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); } |
120 | | inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); } |
121 | | |
122 | | inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); } |
123 | | inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); } |
124 | | |
125 | | inline constexpr bool IsLittleEndian() { return false; } |
126 | | |
127 | | #endif /* ENDIAN */ |
128 | | |
129 | 0 | inline uint8_t FromHost(uint8_t x) { return x; } |
130 | 0 | inline uint16_t FromHost(uint16_t x) { return FromHost16(x); } |
131 | 0 | inline uint32_t FromHost(uint32_t x) { return FromHost32(x); } |
132 | 0 | inline uint64_t FromHost(uint64_t x) { return FromHost64(x); } |
133 | 0 | inline uint8_t ToHost(uint8_t x) { return x; } |
134 | 0 | inline uint16_t ToHost(uint16_t x) { return ToHost16(x); } |
135 | 0 | inline uint32_t ToHost(uint32_t x) { return ToHost32(x); } |
136 | 0 | inline uint64_t ToHost(uint64_t x) { return ToHost64(x); } |
137 | | |
138 | 0 | inline int8_t FromHost(int8_t x) { return x; } |
139 | 0 | inline int16_t FromHost(int16_t x) { |
140 | 0 | return bit_cast<int16_t>(FromHost16(bit_cast<uint16_t>(x))); |
141 | 0 | } |
142 | 0 | inline int32_t FromHost(int32_t x) { |
143 | 0 | return bit_cast<int32_t>(FromHost32(bit_cast<uint32_t>(x))); |
144 | 0 | } |
145 | 0 | inline int64_t FromHost(int64_t x) { |
146 | 0 | return bit_cast<int64_t>(FromHost64(bit_cast<uint64_t>(x))); |
147 | 0 | } |
148 | 0 | inline int8_t ToHost(int8_t x) { return x; } |
149 | 0 | inline int16_t ToHost(int16_t x) { |
150 | 0 | return bit_cast<int16_t>(ToHost16(bit_cast<uint16_t>(x))); |
151 | 0 | } |
152 | 0 | inline int32_t ToHost(int32_t x) { |
153 | 0 | return bit_cast<int32_t>(ToHost32(bit_cast<uint32_t>(x))); |
154 | 0 | } |
155 | 0 | inline int64_t ToHost(int64_t x) { |
156 | 0 | return bit_cast<int64_t>(ToHost64(bit_cast<uint64_t>(x))); |
157 | 0 | } |
158 | | |
159 | | // Functions to do unaligned loads and stores in little-endian order. |
160 | 0 | inline uint16_t Load16(const void* absl_nonnull p) { |
161 | 0 | return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); |
162 | 0 | } |
163 | | |
164 | 0 | inline void Store16(void* absl_nonnull p, uint16_t v) { |
165 | 0 | ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v)); |
166 | 0 | } |
167 | | |
168 | 0 | inline uint32_t Load32(const void* absl_nonnull p) { |
169 | 0 | return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p)); |
170 | 0 | } |
171 | | |
172 | 0 | inline void Store32(void* absl_nonnull p, uint32_t v) { |
173 | 0 | ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v)); |
174 | 0 | } |
175 | | |
176 | 60.5k | inline uint64_t Load64(const void* absl_nonnull p) { |
177 | 60.5k | return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p)); |
178 | 60.5k | } |
179 | | |
180 | 172k | inline void Store64(void* absl_nonnull p, uint64_t v) { |
181 | 172k | ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v)); |
182 | 172k | } |
183 | | |
184 | | } // namespace little_endian |
185 | | |
186 | | // Utilities to convert numbers between the current hosts's native byte |
187 | | // order and big-endian byte order (same as network byte order) |
188 | | // |
189 | | // Load/Store methods are alignment safe |
190 | | namespace big_endian { |
191 | | #ifdef ABSL_IS_LITTLE_ENDIAN |
192 | | |
193 | 0 | inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); } |
194 | 0 | inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); } |
195 | | |
196 | 0 | inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); } |
197 | 0 | inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); } |
198 | | |
199 | 0 | inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); } |
200 | 0 | inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); } |
201 | | |
202 | 0 | inline constexpr bool IsLittleEndian() { return true; } |
203 | | |
204 | | #elif defined ABSL_IS_BIG_ENDIAN |
205 | | |
206 | | inline uint16_t FromHost16(uint16_t x) { return x; } |
207 | | inline uint16_t ToHost16(uint16_t x) { return x; } |
208 | | |
209 | | inline uint32_t FromHost32(uint32_t x) { return x; } |
210 | | inline uint32_t ToHost32(uint32_t x) { return x; } |
211 | | |
212 | | inline uint64_t FromHost64(uint64_t x) { return x; } |
213 | | inline uint64_t ToHost64(uint64_t x) { return x; } |
214 | | |
215 | | inline constexpr bool IsLittleEndian() { return false; } |
216 | | |
217 | | #endif /* ENDIAN */ |
218 | | |
219 | 0 | inline uint8_t FromHost(uint8_t x) { return x; } |
220 | 0 | inline uint16_t FromHost(uint16_t x) { return FromHost16(x); } |
221 | 0 | inline uint32_t FromHost(uint32_t x) { return FromHost32(x); } |
222 | 0 | inline uint64_t FromHost(uint64_t x) { return FromHost64(x); } |
223 | 0 | inline uint8_t ToHost(uint8_t x) { return x; } |
224 | 0 | inline uint16_t ToHost(uint16_t x) { return ToHost16(x); } |
225 | 0 | inline uint32_t ToHost(uint32_t x) { return ToHost32(x); } |
226 | 0 | inline uint64_t ToHost(uint64_t x) { return ToHost64(x); } |
227 | | |
228 | 0 | inline int8_t FromHost(int8_t x) { return x; } |
229 | 0 | inline int16_t FromHost(int16_t x) { |
230 | 0 | return bit_cast<int16_t>(FromHost16(bit_cast<uint16_t>(x))); |
231 | 0 | } |
232 | 0 | inline int32_t FromHost(int32_t x) { |
233 | 0 | return bit_cast<int32_t>(FromHost32(bit_cast<uint32_t>(x))); |
234 | 0 | } |
235 | 0 | inline int64_t FromHost(int64_t x) { |
236 | 0 | return bit_cast<int64_t>(FromHost64(bit_cast<uint64_t>(x))); |
237 | 0 | } |
238 | 0 | inline int8_t ToHost(int8_t x) { return x; } |
239 | 0 | inline int16_t ToHost(int16_t x) { |
240 | 0 | return bit_cast<int16_t>(ToHost16(bit_cast<uint16_t>(x))); |
241 | 0 | } |
242 | 0 | inline int32_t ToHost(int32_t x) { |
243 | 0 | return bit_cast<int32_t>(ToHost32(bit_cast<uint32_t>(x))); |
244 | 0 | } |
245 | 0 | inline int64_t ToHost(int64_t x) { |
246 | 0 | return bit_cast<int64_t>(ToHost64(bit_cast<uint64_t>(x))); |
247 | 0 | } |
248 | | |
249 | | // Functions to do unaligned loads and stores in big-endian order. |
250 | 0 | inline uint16_t Load16(const void* absl_nonnull p) { |
251 | 0 | return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); |
252 | 0 | } |
253 | | |
254 | 0 | inline void Store16(void* absl_nonnull p, uint16_t v) { |
255 | 0 | ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v)); |
256 | 0 | } |
257 | | |
258 | 0 | inline uint32_t Load32(const void* absl_nonnull p) { |
259 | 0 | return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p)); |
260 | 0 | } |
261 | | |
262 | 0 | inline void Store32(void* absl_nonnull p, uint32_t v) { |
263 | 0 | ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v)); |
264 | 0 | } |
265 | | |
266 | 0 | inline uint64_t Load64(const void* absl_nonnull p) { |
267 | 0 | return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p)); |
268 | 0 | } |
269 | | |
270 | 0 | inline void Store64(void* absl_nonnull p, uint64_t v) { |
271 | 0 | ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v)); |
272 | 0 | } |
273 | | |
274 | | } // namespace big_endian |
275 | | |
276 | | ABSL_NAMESPACE_END |
277 | | } // namespace absl |
278 | | |
279 | | #endif // ABSL_BASE_INTERNAL_ENDIAN_H_ |