/src/LPM/external.protobuf/include/absl/base/internal/endian.h
Line  | Count  | Source (jump to first uncovered line)  | 
1  |  | // Copyright 2017 The Abseil Authors.  | 
2  |  | //  | 
3  |  | // Licensed under the Apache License, Version 2.0 (the "License");  | 
4  |  | // you may not use this file except in compliance with the License.  | 
5  |  | // You may obtain a copy of the License at  | 
6  |  | //  | 
7  |  | //      https://www.apache.org/licenses/LICENSE-2.0  | 
8  |  | //  | 
9  |  | // Unless required by applicable law or agreed to in writing, software  | 
10  |  | // distributed under the License is distributed on an "AS IS" BASIS,  | 
11  |  | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  | 
12  |  | // See the License for the specific language governing permissions and  | 
13  |  | // limitations under the License.  | 
14  |  | //  | 
15  |  |  | 
16  |  | #ifndef ABSL_BASE_INTERNAL_ENDIAN_H_  | 
17  |  | #define ABSL_BASE_INTERNAL_ENDIAN_H_  | 
18  |  |  | 
19  |  | #include <cstdint>  | 
20  |  | #include <cstdlib>  | 
21  |  |  | 
22  |  | #include "absl/base/casts.h"  | 
23  |  | #include "absl/base/config.h"  | 
24  |  | #include "absl/base/internal/unaligned_access.h"  | 
25  |  | #include "absl/base/nullability.h"  | 
26  |  | #include "absl/base/port.h"  | 
27  |  |  | 
28  |  | namespace absl { | 
29  |  | ABSL_NAMESPACE_BEGIN  | 
30  |  |  | 
31  | 0  | inline uint64_t gbswap_64(uint64_t host_int) { | 
32  | 0  | #if ABSL_HAVE_BUILTIN(__builtin_bswap64) || defined(__GNUC__)  | 
33  | 0  |   return __builtin_bswap64(host_int);  | 
34  | 0  | #elif defined(_MSC_VER)  | 
35  | 0  |   return _byteswap_uint64(host_int);  | 
36  | 0  | #else  | 
37  | 0  |   return (((host_int & uint64_t{0xFF}) << 56) | | 
38  | 0  |           ((host_int & uint64_t{0xFF00}) << 40) | | 
39  | 0  |           ((host_int & uint64_t{0xFF0000}) << 24) | | 
40  | 0  |           ((host_int & uint64_t{0xFF000000}) << 8) | | 
41  | 0  |           ((host_int & uint64_t{0xFF00000000}) >> 8) | | 
42  | 0  |           ((host_int & uint64_t{0xFF0000000000}) >> 24) | | 
43  | 0  |           ((host_int & uint64_t{0xFF000000000000}) >> 40) | | 
44  | 0  |           ((host_int & uint64_t{0xFF00000000000000}) >> 56)); | 
45  | 0  | #endif  | 
46  | 0  | }  | 
47  |  |  | 
48  | 0  | inline uint32_t gbswap_32(uint32_t host_int) { | 
49  | 0  | #if ABSL_HAVE_BUILTIN(__builtin_bswap32) || defined(__GNUC__)  | 
50  | 0  |   return __builtin_bswap32(host_int);  | 
51  | 0  | #elif defined(_MSC_VER)  | 
52  | 0  |   return _byteswap_ulong(host_int);  | 
53  | 0  | #else  | 
54  | 0  |   return (((host_int & uint32_t{0xFF}) << 24) | | 
55  | 0  |           ((host_int & uint32_t{0xFF00}) << 8) | | 
56  | 0  |           ((host_int & uint32_t{0xFF0000}) >> 8) | | 
57  | 0  |           ((host_int & uint32_t{0xFF000000}) >> 24)); | 
58  | 0  | #endif  | 
59  | 0  | }  | 
60  |  |  | 
61  | 0  | inline uint16_t gbswap_16(uint16_t host_int) { | 
62  | 0  | #if ABSL_HAVE_BUILTIN(__builtin_bswap16) || defined(__GNUC__)  | 
63  | 0  |   return __builtin_bswap16(host_int);  | 
64  | 0  | #elif defined(_MSC_VER)  | 
65  | 0  |   return _byteswap_ushort(host_int);  | 
66  | 0  | #else  | 
67  | 0  |   return (((host_int & uint16_t{0xFF}) << 8) | | 
68  | 0  |           ((host_int & uint16_t{0xFF00}) >> 8)); | 
69  | 0  | #endif  | 
70  | 0  | }  | 
71  |  |  | 
72  |  | #ifdef ABSL_IS_LITTLE_ENDIAN  | 
73  |  |  | 
74  |  | // Portable definitions for htonl (host-to-network) and friends on little-endian  | 
75  |  | // architectures.  | 
76  | 0  | inline uint16_t ghtons(uint16_t x) { return gbswap_16(x); } | 
77  | 0  | inline uint32_t ghtonl(uint32_t x) { return gbswap_32(x); } | 
78  | 0  | inline uint64_t ghtonll(uint64_t x) { return gbswap_64(x); } | 
79  |  |  | 
80  |  | #elif defined ABSL_IS_BIG_ENDIAN  | 
81  |  |  | 
82  |  | // Portable definitions for htonl (host-to-network) etc on big-endian  | 
83  |  | // architectures. These definitions are simpler since the host byte order is the  | 
84  |  | // same as network byte order.  | 
85  |  | inline uint16_t ghtons(uint16_t x) { return x; } | 
86  |  | inline uint32_t ghtonl(uint32_t x) { return x; } | 
87  |  | inline uint64_t ghtonll(uint64_t x) { return x; } | 
88  |  |  | 
89  |  | #else  | 
90  |  | #error \  | 
91  |  |     "Unsupported byte order: Either ABSL_IS_BIG_ENDIAN or " \  | 
92  |  |        "ABSL_IS_LITTLE_ENDIAN must be defined"  | 
93  |  | #endif  // byte order  | 
94  |  |  | 
95  | 0  | inline uint16_t gntohs(uint16_t x) { return ghtons(x); } | 
96  | 0  | inline uint32_t gntohl(uint32_t x) { return ghtonl(x); } | 
97  | 0  | inline uint64_t gntohll(uint64_t x) { return ghtonll(x); } | 
98  |  |  | 
99  |  | // Utilities to convert numbers between the current hosts's native byte  | 
100  |  | // order and little-endian byte order  | 
101  |  | //  | 
102  |  | // Load/Store methods are alignment safe  | 
103  |  | namespace little_endian { | 
104  |  | // Conversion functions.  | 
105  |  | #ifdef ABSL_IS_LITTLE_ENDIAN  | 
106  |  |  | 
107  | 0  | inline uint16_t FromHost16(uint16_t x) { return x; } | 
108  | 0  | inline uint16_t ToHost16(uint16_t x) { return x; } | 
109  |  |  | 
110  | 0  | inline uint32_t FromHost32(uint32_t x) { return x; } | 
111  | 0  | inline uint32_t ToHost32(uint32_t x) { return x; } | 
112  |  |  | 
113  | 0  | inline uint64_t FromHost64(uint64_t x) { return x; } | 
114  | 0  | inline uint64_t ToHost64(uint64_t x) { return x; } | 
115  |  |  | 
116  | 0  | inline constexpr bool IsLittleEndian() { return true; } | 
117  |  |  | 
118  |  | #elif defined ABSL_IS_BIG_ENDIAN  | 
119  |  |  | 
120  |  | inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); } | 
121  |  | inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); } | 
122  |  |  | 
123  |  | inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); } | 
124  |  | inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); } | 
125  |  |  | 
126  |  | inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); } | 
127  |  | inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); } | 
128  |  |  | 
129  |  | inline constexpr bool IsLittleEndian() { return false; } | 
130  |  |  | 
131  |  | #endif /* ENDIAN */  | 
132  |  |  | 
133  | 0  | inline uint8_t FromHost(uint8_t x) { return x; } | 
134  | 0  | inline uint16_t FromHost(uint16_t x) { return FromHost16(x); } | 
135  | 0  | inline uint32_t FromHost(uint32_t x) { return FromHost32(x); } | 
136  | 0  | inline uint64_t FromHost(uint64_t x) { return FromHost64(x); } | 
137  | 0  | inline uint8_t ToHost(uint8_t x) { return x; } | 
138  | 0  | inline uint16_t ToHost(uint16_t x) { return ToHost16(x); } | 
139  | 0  | inline uint32_t ToHost(uint32_t x) { return ToHost32(x); } | 
140  | 0  | inline uint64_t ToHost(uint64_t x) { return ToHost64(x); } | 
141  |  |  | 
142  | 0  | inline int8_t FromHost(int8_t x) { return x; } | 
143  | 0  | inline int16_t FromHost(int16_t x) { | 
144  | 0  |   return bit_cast<int16_t>(FromHost16(bit_cast<uint16_t>(x)));  | 
145  | 0  | }  | 
146  | 0  | inline int32_t FromHost(int32_t x) { | 
147  | 0  |   return bit_cast<int32_t>(FromHost32(bit_cast<uint32_t>(x)));  | 
148  | 0  | }  | 
149  | 0  | inline int64_t FromHost(int64_t x) { | 
150  | 0  |   return bit_cast<int64_t>(FromHost64(bit_cast<uint64_t>(x)));  | 
151  | 0  | }  | 
152  | 0  | inline int8_t ToHost(int8_t x) { return x; } | 
153  | 0  | inline int16_t ToHost(int16_t x) { | 
154  | 0  |   return bit_cast<int16_t>(ToHost16(bit_cast<uint16_t>(x)));  | 
155  | 0  | }  | 
156  | 0  | inline int32_t ToHost(int32_t x) { | 
157  | 0  |   return bit_cast<int32_t>(ToHost32(bit_cast<uint32_t>(x)));  | 
158  | 0  | }  | 
159  | 0  | inline int64_t ToHost(int64_t x) { | 
160  | 0  |   return bit_cast<int64_t>(ToHost64(bit_cast<uint64_t>(x)));  | 
161  | 0  | }  | 
162  |  |  | 
163  |  | // Functions to do unaligned loads and stores in little-endian order.  | 
164  | 0  | inline uint16_t Load16(absl::Nonnull<const void *> p) { | 
165  | 0  |   return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));  | 
166  | 0  | }  | 
167  |  |  | 
168  | 0  | inline void Store16(absl::Nonnull<void *> p, uint16_t v) { | 
169  | 0  |   ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));  | 
170  | 0  | }  | 
171  |  |  | 
172  | 0  | inline uint32_t Load32(absl::Nonnull<const void *> p) { | 
173  | 0  |   return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));  | 
174  | 0  | }  | 
175  |  |  | 
176  | 0  | inline void Store32(absl::Nonnull<void *> p, uint32_t v) { | 
177  | 0  |   ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));  | 
178  | 0  | }  | 
179  |  |  | 
180  | 0  | inline uint64_t Load64(absl::Nonnull<const void *> p) { | 
181  | 0  |   return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));  | 
182  | 0  | }  | 
183  |  |  | 
184  | 0  | inline void Store64(absl::Nonnull<void *> p, uint64_t v) { | 
185  | 0  |   ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));  | 
186  | 0  | }  | 
187  |  |  | 
188  |  | }  // namespace little_endian  | 
189  |  |  | 
190  |  | // Utilities to convert numbers between the current hosts's native byte  | 
191  |  | // order and big-endian byte order (same as network byte order)  | 
192  |  | //  | 
193  |  | // Load/Store methods are alignment safe  | 
194  |  | namespace big_endian { | 
195  |  | #ifdef ABSL_IS_LITTLE_ENDIAN  | 
196  |  |  | 
197  | 0  | inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); } | 
198  | 0  | inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); } | 
199  |  |  | 
200  | 0  | inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); } | 
201  | 0  | inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); } | 
202  |  |  | 
203  | 0  | inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); } | 
204  | 0  | inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); } | 
205  |  |  | 
206  | 0  | inline constexpr bool IsLittleEndian() { return true; } | 
207  |  |  | 
208  |  | #elif defined ABSL_IS_BIG_ENDIAN  | 
209  |  |  | 
210  |  | inline uint16_t FromHost16(uint16_t x) { return x; } | 
211  |  | inline uint16_t ToHost16(uint16_t x) { return x; } | 
212  |  |  | 
213  |  | inline uint32_t FromHost32(uint32_t x) { return x; } | 
214  |  | inline uint32_t ToHost32(uint32_t x) { return x; } | 
215  |  |  | 
216  |  | inline uint64_t FromHost64(uint64_t x) { return x; } | 
217  |  | inline uint64_t ToHost64(uint64_t x) { return x; } | 
218  |  |  | 
219  |  | inline constexpr bool IsLittleEndian() { return false; } | 
220  |  |  | 
221  |  | #endif /* ENDIAN */  | 
222  |  |  | 
223  | 0  | inline uint8_t FromHost(uint8_t x) { return x; } | 
224  | 0  | inline uint16_t FromHost(uint16_t x) { return FromHost16(x); } | 
225  | 0  | inline uint32_t FromHost(uint32_t x) { return FromHost32(x); } | 
226  | 0  | inline uint64_t FromHost(uint64_t x) { return FromHost64(x); } | 
227  | 0  | inline uint8_t ToHost(uint8_t x) { return x; } | 
228  | 0  | inline uint16_t ToHost(uint16_t x) { return ToHost16(x); } | 
229  | 0  | inline uint32_t ToHost(uint32_t x) { return ToHost32(x); } | 
230  | 0  | inline uint64_t ToHost(uint64_t x) { return ToHost64(x); } | 
231  |  |  | 
232  | 0  | inline int8_t FromHost(int8_t x) { return x; } | 
233  | 0  | inline int16_t FromHost(int16_t x) { | 
234  | 0  |   return bit_cast<int16_t>(FromHost16(bit_cast<uint16_t>(x)));  | 
235  | 0  | }  | 
236  | 0  | inline int32_t FromHost(int32_t x) { | 
237  | 0  |   return bit_cast<int32_t>(FromHost32(bit_cast<uint32_t>(x)));  | 
238  | 0  | }  | 
239  | 0  | inline int64_t FromHost(int64_t x) { | 
240  | 0  |   return bit_cast<int64_t>(FromHost64(bit_cast<uint64_t>(x)));  | 
241  | 0  | }  | 
242  | 0  | inline int8_t ToHost(int8_t x) { return x; } | 
243  | 0  | inline int16_t ToHost(int16_t x) { | 
244  | 0  |   return bit_cast<int16_t>(ToHost16(bit_cast<uint16_t>(x)));  | 
245  | 0  | }  | 
246  | 0  | inline int32_t ToHost(int32_t x) { | 
247  | 0  |   return bit_cast<int32_t>(ToHost32(bit_cast<uint32_t>(x)));  | 
248  | 0  | }  | 
249  | 0  | inline int64_t ToHost(int64_t x) { | 
250  | 0  |   return bit_cast<int64_t>(ToHost64(bit_cast<uint64_t>(x)));  | 
251  | 0  | }  | 
252  |  |  | 
253  |  | // Functions to do unaligned loads and stores in big-endian order.  | 
254  | 0  | inline uint16_t Load16(absl::Nonnull<const void *> p) { | 
255  | 0  |   return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));  | 
256  | 0  | }  | 
257  |  |  | 
258  | 0  | inline void Store16(absl::Nonnull<void *> p, uint16_t v) { | 
259  | 0  |   ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));  | 
260  | 0  | }  | 
261  |  |  | 
262  | 0  | inline uint32_t Load32(absl::Nonnull<const void *> p) { | 
263  | 0  |   return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));  | 
264  | 0  | }  | 
265  |  |  | 
266  | 0  | inline void Store32(absl::Nonnull<void *>p, uint32_t v) { | 
267  | 0  |   ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));  | 
268  | 0  | }  | 
269  |  |  | 
270  | 0  | inline uint64_t Load64(absl::Nonnull<const void *> p) { | 
271  | 0  |   return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));  | 
272  | 0  | }  | 
273  |  |  | 
274  | 0  | inline void Store64(absl::Nonnull<void *> p, uint64_t v) { | 
275  | 0  |   ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));  | 
276  | 0  | }  | 
277  |  |  | 
278  |  | }  // namespace big_endian  | 
279  |  |  | 
280  |  | ABSL_NAMESPACE_END  | 
281  |  | }  // namespace absl  | 
282  |  |  | 
283  |  | #endif  // ABSL_BASE_INTERNAL_ENDIAN_H_  |