/src/flatbuffers/include/flatbuffers/flexbuffers.h
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright 2017 Google Inc. All rights reserved. |
3 | | * |
4 | | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | | * you may not use this file except in compliance with the License. |
6 | | * You may obtain a copy of the License at |
7 | | * |
8 | | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | | * |
10 | | * Unless required by applicable law or agreed to in writing, software |
11 | | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | | * See the License for the specific language governing permissions and |
14 | | * limitations under the License. |
15 | | */ |
16 | | |
17 | | #ifndef FLATBUFFERS_FLEXBUFFERS_H_ |
18 | | #define FLATBUFFERS_FLEXBUFFERS_H_ |
19 | | |
20 | | #include <algorithm> |
21 | | #include <map> |
22 | | // Used to select STL variant. |
23 | | #include "flatbuffers/base.h" |
24 | | // We use the basic binary writing functions from the regular FlatBuffers. |
25 | | #include "flatbuffers/util.h" |
26 | | |
27 | | #ifdef _MSC_VER |
28 | | # include <intrin.h> |
29 | | #endif |
30 | | |
31 | | #if defined(_MSC_VER) |
32 | | # pragma warning(push) |
33 | | # pragma warning(disable : 4127) // C4127: conditional expression is constant |
34 | | #endif |
35 | | |
36 | | namespace flexbuffers { |
37 | | |
38 | | class Reference; |
39 | | class Map; |
40 | | |
41 | | // These are used in the lower 2 bits of a type field to determine the size of |
42 | | // the elements (and or size field) of the item pointed to (e.g. vector). |
43 | | enum BitWidth { |
44 | | BIT_WIDTH_8 = 0, |
45 | | BIT_WIDTH_16 = 1, |
46 | | BIT_WIDTH_32 = 2, |
47 | | BIT_WIDTH_64 = 3, |
48 | | }; |
49 | | |
50 | | // These are used as the upper 6 bits of a type field to indicate the actual |
51 | | // type. |
52 | | enum Type { |
53 | | FBT_NULL = 0, |
54 | | FBT_INT = 1, |
55 | | FBT_UINT = 2, |
56 | | FBT_FLOAT = 3, |
57 | | // Types above stored inline, types below (except FBT_BOOL) store an offset. |
58 | | FBT_KEY = 4, |
59 | | FBT_STRING = 5, |
60 | | FBT_INDIRECT_INT = 6, |
61 | | FBT_INDIRECT_UINT = 7, |
62 | | FBT_INDIRECT_FLOAT = 8, |
63 | | FBT_MAP = 9, |
64 | | FBT_VECTOR = 10, // Untyped. |
65 | | FBT_VECTOR_INT = 11, // Typed any size (stores no type table). |
66 | | FBT_VECTOR_UINT = 12, |
67 | | FBT_VECTOR_FLOAT = 13, |
68 | | FBT_VECTOR_KEY = 14, |
69 | | // DEPRECATED, use FBT_VECTOR or FBT_VECTOR_KEY instead. |
70 | | // Read test.cpp/FlexBuffersDeprecatedTest() for details on why. |
71 | | FBT_VECTOR_STRING_DEPRECATED = 15, |
72 | | FBT_VECTOR_INT2 = 16, // Typed tuple (no type table, no size field). |
73 | | FBT_VECTOR_UINT2 = 17, |
74 | | FBT_VECTOR_FLOAT2 = 18, |
75 | | FBT_VECTOR_INT3 = 19, // Typed triple (no type table, no size field). |
76 | | FBT_VECTOR_UINT3 = 20, |
77 | | FBT_VECTOR_FLOAT3 = 21, |
78 | | FBT_VECTOR_INT4 = 22, // Typed quad (no type table, no size field). |
79 | | FBT_VECTOR_UINT4 = 23, |
80 | | FBT_VECTOR_FLOAT4 = 24, |
81 | | FBT_BLOB = 25, |
82 | | FBT_BOOL = 26, |
83 | | FBT_VECTOR_BOOL = |
84 | | 36, // To Allow the same type of conversion of type to vector type |
85 | | |
86 | | FBT_MAX_TYPE = 37 |
87 | | }; |
88 | | |
89 | 1.83M | inline bool IsInline(Type t) { return t <= FBT_FLOAT || t == FBT_BOOL; } |
90 | | |
91 | 0 | inline bool IsTypedVectorElementType(Type t) { |
92 | 0 | return (t >= FBT_INT && t <= FBT_STRING) || t == FBT_BOOL; |
93 | 0 | } |
94 | | |
95 | 0 | inline bool IsTypedVector(Type t) { |
96 | 0 | return (t >= FBT_VECTOR_INT && t <= FBT_VECTOR_STRING_DEPRECATED) || |
97 | 0 | t == FBT_VECTOR_BOOL; |
98 | 0 | } |
99 | | |
100 | 10.2k | inline bool IsFixedTypedVector(Type t) { |
101 | 10.2k | return t >= FBT_VECTOR_INT2 && t <= FBT_VECTOR_FLOAT4; |
102 | 10.2k | } |
103 | | |
104 | 0 | inline Type ToTypedVector(Type t, size_t fixed_len = 0) { |
105 | 0 | FLATBUFFERS_ASSERT(IsTypedVectorElementType(t)); |
106 | 0 | switch (fixed_len) { |
107 | 0 | case 0: return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT); |
108 | 0 | case 2: return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT2); |
109 | 0 | case 3: return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT3); |
110 | 0 | case 4: return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT4); |
111 | 0 | default: FLATBUFFERS_ASSERT(0); return FBT_NULL; |
112 | 0 | } |
113 | 0 | } |
114 | | |
115 | 0 | inline Type ToTypedVectorElementType(Type t) { |
116 | 0 | FLATBUFFERS_ASSERT(IsTypedVector(t)); |
117 | 0 | return static_cast<Type>(t - FBT_VECTOR_INT + FBT_INT); |
118 | 0 | } |
119 | | |
120 | 10.2k | inline Type ToFixedTypedVectorElementType(Type t, uint8_t *len) { |
121 | 10.2k | FLATBUFFERS_ASSERT(IsFixedTypedVector(t)); |
122 | 10.2k | auto fixed_type = t - FBT_VECTOR_INT2; |
123 | 10.2k | *len = static_cast<uint8_t>(fixed_type / 3 + |
124 | 10.2k | 2); // 3 types each, starting from length 2. |
125 | 10.2k | return static_cast<Type>(fixed_type % 3 + FBT_INT); |
126 | 10.2k | } |
127 | | |
128 | | // TODO: implement proper support for 8/16bit floats, or decide not to |
129 | | // support them. |
130 | | typedef int16_t half; |
131 | | typedef int8_t quarter; |
132 | | |
133 | | // TODO: can we do this without conditionals using intrinsics or inline asm |
134 | | // on some platforms? Given branch prediction the method below should be |
135 | | // decently quick, but it is the most frequently executed function. |
136 | | // We could do an (unaligned) 64-bit read if we ifdef out the platforms for |
137 | | // which that doesn't work (or where we'd read into un-owned memory). |
138 | | template<typename R, typename T1, typename T2, typename T4, typename T8> |
139 | 3.88M | R ReadSizedScalar(const uint8_t *data, uint8_t byte_width) { |
140 | 3.88M | return byte_width < 4 |
141 | 3.88M | ? (byte_width < 2 |
142 | 3.87M | ? static_cast<R>(flatbuffers::ReadScalar<T1>(data)) |
143 | 3.87M | : static_cast<R>(flatbuffers::ReadScalar<T2>(data))) |
144 | 3.88M | : (byte_width < 8 |
145 | 13.8k | ? static_cast<R>(flatbuffers::ReadScalar<T4>(data)) |
146 | 13.8k | : static_cast<R>(flatbuffers::ReadScalar<T8>(data))); |
147 | 3.88M | } unsigned long flexbuffers::ReadSizedScalar<unsigned long, unsigned char, unsigned short, unsigned int, unsigned long>(unsigned char const*, unsigned char) Line | Count | Source | 139 | 3.88M | R ReadSizedScalar(const uint8_t *data, uint8_t byte_width) { | 140 | 3.88M | return byte_width < 4 | 141 | 3.88M | ? (byte_width < 2 | 142 | 3.87M | ? static_cast<R>(flatbuffers::ReadScalar<T1>(data)) | 143 | 3.87M | : static_cast<R>(flatbuffers::ReadScalar<T2>(data))) | 144 | 3.88M | : (byte_width < 8 | 145 | 13.8k | ? static_cast<R>(flatbuffers::ReadScalar<T4>(data)) | 146 | 13.8k | : static_cast<R>(flatbuffers::ReadScalar<T8>(data))); | 147 | 3.88M | } |
Unexecuted instantiation: long flexbuffers::ReadSizedScalar<long, signed char, short, int, long>(unsigned char const*, unsigned char) Unexecuted instantiation: double flexbuffers::ReadSizedScalar<double, signed char, short, float, double>(unsigned char const*, unsigned char) |
148 | | |
149 | 0 | inline int64_t ReadInt64(const uint8_t *data, uint8_t byte_width) { |
150 | 0 | return ReadSizedScalar<int64_t, int8_t, int16_t, int32_t, int64_t>( |
151 | 0 | data, byte_width); |
152 | 0 | } |
153 | | |
154 | 3.88M | inline uint64_t ReadUInt64(const uint8_t *data, uint8_t byte_width) { |
155 | | // This is the "hottest" function (all offset lookups use this), so worth |
156 | | // optimizing if possible. |
157 | | // TODO: GCC apparently replaces memcpy by a rep movsb, but only if count is a |
158 | | // constant, which here it isn't. Test if memcpy is still faster than |
159 | | // the conditionals in ReadSizedScalar. Can also use inline asm. |
160 | | |
161 | | // clang-format off |
162 | | #if defined(_MSC_VER) && defined(_M_X64) && !defined(_M_ARM64EC) |
163 | | // This is 64-bit Windows only, __movsb does not work on 32-bit Windows. |
164 | | uint64_t u = 0; |
165 | | __movsb(reinterpret_cast<uint8_t *>(&u), |
166 | | reinterpret_cast<const uint8_t *>(data), byte_width); |
167 | | return flatbuffers::EndianScalar(u); |
168 | | #else |
169 | 3.88M | return ReadSizedScalar<uint64_t, uint8_t, uint16_t, uint32_t, uint64_t>( |
170 | 3.88M | data, byte_width); |
171 | 3.88M | #endif |
172 | | // clang-format on |
173 | 3.88M | } |
174 | | |
175 | 0 | inline double ReadDouble(const uint8_t *data, uint8_t byte_width) { |
176 | 0 | return ReadSizedScalar<double, quarter, half, float, double>(data, |
177 | 0 | byte_width); |
178 | 0 | } |
179 | | |
180 | 1.03M | inline const uint8_t *Indirect(const uint8_t *offset, uint8_t byte_width) { |
181 | 1.03M | return offset - ReadUInt64(offset, byte_width); |
182 | 1.03M | } |
183 | | |
184 | 0 | template<typename T> const uint8_t *Indirect(const uint8_t *offset) { |
185 | 0 | return offset - flatbuffers::ReadScalar<T>(offset); |
186 | 0 | } Unexecuted instantiation: unsigned char const* flexbuffers::Indirect<unsigned char>(unsigned char const*) Unexecuted instantiation: unsigned char const* flexbuffers::Indirect<unsigned short>(unsigned char const*) Unexecuted instantiation: unsigned char const* flexbuffers::Indirect<unsigned int>(unsigned char const*) Unexecuted instantiation: unsigned char const* flexbuffers::Indirect<unsigned long>(unsigned char const*) |
187 | | |
188 | 0 | inline BitWidth WidthU(uint64_t u) { |
189 | 0 | #define FLATBUFFERS_GET_FIELD_BIT_WIDTH(value, width) \ |
190 | 0 | { \ |
191 | 0 | if (!((u) & ~((1ULL << (width)) - 1ULL))) return BIT_WIDTH_##width; \ |
192 | 0 | } |
193 | 0 | FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 8); |
194 | 0 | FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 16); |
195 | 0 | FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 32); |
196 | 0 | #undef FLATBUFFERS_GET_FIELD_BIT_WIDTH |
197 | 0 | return BIT_WIDTH_64; |
198 | 0 | } |
199 | | |
200 | 0 | inline BitWidth WidthI(int64_t i) { |
201 | 0 | auto u = static_cast<uint64_t>(i) << 1; |
202 | 0 | return WidthU(i >= 0 ? u : ~u); |
203 | 0 | } |
204 | | |
205 | 0 | inline BitWidth WidthF(double f) { |
206 | 0 | return static_cast<double>(static_cast<float>(f)) == f ? BIT_WIDTH_32 |
207 | 0 | : BIT_WIDTH_64; |
208 | 0 | } |
209 | | |
210 | | // Base class of all types below. |
211 | | // Points into the data buffer and allows access to one type. |
212 | | class Object { |
213 | | public: |
214 | | Object(const uint8_t *data, uint8_t byte_width) |
215 | 1.81M | : data_(data), byte_width_(byte_width) {} |
216 | | |
217 | | protected: |
218 | | const uint8_t *data_; |
219 | | uint8_t byte_width_; |
220 | | }; |
221 | | |
222 | | // Object that has a size, obtained either from size prefix, or elsewhere. |
223 | | class Sized : public Object { |
224 | | public: |
225 | | // Size prefix. |
226 | | Sized(const uint8_t *data, uint8_t byte_width) |
227 | 1.81M | : Object(data, byte_width), size_(read_size()) {} |
228 | | // Manual size. |
229 | | Sized(const uint8_t *data, uint8_t byte_width, size_t sz) |
230 | 0 | : Object(data, byte_width), size_(sz) {} |
231 | 2.74M | size_t size() const { return size_; } |
232 | | // Access size stored in `byte_width_` bytes before data_ pointer. |
233 | 1.81M | size_t read_size() const { |
234 | 1.81M | return static_cast<size_t>(ReadUInt64(data_ - byte_width_, byte_width_)); |
235 | 1.81M | } |
236 | | |
237 | | protected: |
238 | | size_t size_; |
239 | | }; |
240 | | |
241 | | class String : public Sized { |
242 | | public: |
243 | | // Size prefix. |
244 | 3.56k | String(const uint8_t *data, uint8_t byte_width) : Sized(data, byte_width) {} |
245 | | // Manual size. |
246 | | String(const uint8_t *data, uint8_t byte_width, size_t sz) |
247 | 0 | : Sized(data, byte_width, sz) {} |
248 | | |
249 | 0 | size_t length() const { return size(); } |
250 | 3.56k | const char *c_str() const { return reinterpret_cast<const char *>(data_); } |
251 | 0 | std::string str() const { return std::string(c_str(), size()); } |
252 | | |
253 | 0 | static String EmptyString() { |
254 | 0 | static const char *empty_string = ""; |
255 | 0 | return String(reinterpret_cast<const uint8_t *>(empty_string), 1, 0); |
256 | 0 | } |
257 | 0 | bool IsTheEmptyString() const { return data_ == EmptyString().data_; } |
258 | | }; |
259 | | |
260 | | class Blob : public Sized { |
261 | | public: |
262 | | Blob(const uint8_t *data_buf, uint8_t byte_width) |
263 | 0 | : Sized(data_buf, byte_width) {} |
264 | | |
265 | 0 | static Blob EmptyBlob() { |
266 | 0 | static const uint8_t empty_blob[] = { 0 /*len*/ }; |
267 | 0 | return Blob(empty_blob + 1, 1); |
268 | 0 | } |
269 | 0 | bool IsTheEmptyBlob() const { return data_ == EmptyBlob().data_; } |
270 | 0 | const uint8_t *data() const { return data_; } |
271 | | }; |
272 | | |
273 | | class Vector : public Sized { |
274 | | public: |
275 | 895k | Vector(const uint8_t *data, uint8_t byte_width) : Sized(data, byte_width) {} |
276 | | |
277 | | Reference operator[](size_t i) const; |
278 | | |
279 | 0 | static Vector EmptyVector() { |
280 | 0 | static const uint8_t empty_vector[] = { 0 /*len*/ }; |
281 | 0 | return Vector(empty_vector + 1, 1); |
282 | 0 | } |
283 | 0 | bool IsTheEmptyVector() const { return data_ == EmptyVector().data_; } |
284 | | }; |
285 | | |
286 | | class TypedVector : public Sized { |
287 | | public: |
288 | | TypedVector(const uint8_t *data, uint8_t byte_width, Type element_type) |
289 | 5.10k | : Sized(data, byte_width), type_(element_type) {} |
290 | | |
291 | | Reference operator[](size_t i) const; |
292 | | |
293 | 0 | static TypedVector EmptyTypedVector() { |
294 | 0 | static const uint8_t empty_typed_vector[] = { 0 /*len*/ }; |
295 | 0 | return TypedVector(empty_typed_vector + 1, 1, FBT_INT); |
296 | 0 | } |
297 | 0 | bool IsTheEmptyVector() const { |
298 | 0 | return data_ == TypedVector::EmptyTypedVector().data_; |
299 | 0 | } |
300 | | |
301 | 0 | Type ElementType() { return type_; } |
302 | | |
303 | | friend Reference; |
304 | | |
305 | | private: |
306 | | Type type_; |
307 | | |
308 | | friend Map; |
309 | | }; |
310 | | |
311 | | class FixedTypedVector : public Object { |
312 | | public: |
313 | | FixedTypedVector(const uint8_t *data, uint8_t byte_width, Type element_type, |
314 | | uint8_t len) |
315 | 0 | : Object(data, byte_width), type_(element_type), len_(len) {} |
316 | | |
317 | | Reference operator[](size_t i) const; |
318 | | |
319 | 0 | static FixedTypedVector EmptyFixedTypedVector() { |
320 | 0 | static const uint8_t fixed_empty_vector[] = { 0 /* unused */ }; |
321 | 0 | return FixedTypedVector(fixed_empty_vector, 1, FBT_INT, 0); |
322 | 0 | } |
323 | 0 | bool IsTheEmptyFixedTypedVector() const { |
324 | 0 | return data_ == FixedTypedVector::EmptyFixedTypedVector().data_; |
325 | 0 | } |
326 | | |
327 | 0 | Type ElementType() const { return type_; } |
328 | 0 | uint8_t size() const { return len_; } |
329 | | |
330 | | private: |
331 | | Type type_; |
332 | | uint8_t len_; |
333 | | }; |
334 | | |
335 | | class Map : public Vector { |
336 | | public: |
337 | 0 | Map(const uint8_t *data, uint8_t byte_width) : Vector(data, byte_width) {} |
338 | | |
339 | | Reference operator[](const char *key) const; |
340 | | Reference operator[](const std::string &key) const; |
341 | | |
342 | 0 | Vector Values() const { return Vector(data_, byte_width_); } |
343 | | |
344 | 0 | TypedVector Keys() const { |
345 | 0 | const size_t num_prefixed_fields = 3; |
346 | 0 | auto keys_offset = data_ - byte_width_ * num_prefixed_fields; |
347 | 0 | return TypedVector(Indirect(keys_offset, byte_width_), |
348 | 0 | static_cast<uint8_t>( |
349 | 0 | ReadUInt64(keys_offset + byte_width_, byte_width_)), |
350 | 0 | FBT_KEY); |
351 | 0 | } |
352 | | |
353 | 0 | static Map EmptyMap() { |
354 | 0 | static const uint8_t empty_map[] = { |
355 | 0 | 0 /*keys_len*/, 0 /*keys_offset*/, 1 /*keys_width*/, 0 /*len*/ |
356 | 0 | }; |
357 | 0 | return Map(empty_map + 4, 1); |
358 | 0 | } |
359 | | |
360 | 0 | bool IsTheEmptyMap() const { return data_ == EmptyMap().data_; } |
361 | | }; |
362 | | |
363 | | inline void IndentString(std::string &s, int indent, |
364 | 0 | const char *indent_string) { |
365 | 0 | for (int i = 0; i < indent; i++) s += indent_string; |
366 | 0 | } |
367 | | |
368 | | template<typename T> |
369 | | void AppendToString(std::string &s, T &&v, bool keys_quoted, bool indented, |
370 | | int cur_indent, const char *indent_string, |
371 | 0 | bool natural_utf8) { |
372 | 0 | s += "["; |
373 | 0 | s += indented ? "\n" : " "; |
374 | 0 | for (size_t i = 0; i < v.size(); i++) { |
375 | 0 | if (i) { |
376 | 0 | s += ","; |
377 | 0 | s += indented ? "\n" : " "; |
378 | 0 | } |
379 | 0 | if (indented) IndentString(s, cur_indent, indent_string); |
380 | 0 | v[i].ToString(true, keys_quoted, s, indented, cur_indent, |
381 | 0 | indent_string, natural_utf8); |
382 | 0 | } |
383 | 0 | if (indented) { |
384 | 0 | s += "\n"; |
385 | 0 | IndentString(s, cur_indent - 1, indent_string); |
386 | 0 | } else { |
387 | 0 | s += " "; |
388 | 0 | } |
389 | 0 | s += "]"; |
390 | 0 | } Unexecuted instantiation: void flexbuffers::AppendToString<flexbuffers::Vector>(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&, flexbuffers::Vector&&, bool, bool, int, char const*, bool) Unexecuted instantiation: void flexbuffers::AppendToString<flexbuffers::TypedVector>(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&, flexbuffers::TypedVector&&, bool, bool, int, char const*, bool) Unexecuted instantiation: void flexbuffers::AppendToString<flexbuffers::FixedTypedVector>(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&, flexbuffers::FixedTypedVector&&, bool, bool, int, char const*, bool) |
391 | | |
392 | | template<typename T> |
393 | | void AppendToString(std::string &s, T &&v, bool keys_quoted) { |
394 | | AppendToString(s, v, keys_quoted); |
395 | | } |
396 | | |
397 | | |
398 | | class Reference { |
399 | | public: |
400 | | Reference() |
401 | 0 | : data_(nullptr), parent_width_(0), byte_width_(0), type_(FBT_NULL) {} |
402 | | |
403 | | Reference(const uint8_t *data, uint8_t parent_width, uint8_t byte_width, |
404 | | Type type) |
405 | 107k | : data_(data), |
406 | 107k | parent_width_(parent_width), |
407 | 107k | byte_width_(byte_width), |
408 | 107k | type_(type) {} |
409 | | |
410 | | Reference(const uint8_t *data, uint8_t parent_width, uint8_t packed_type) |
411 | 1.72M | : data_(data), |
412 | 1.72M | parent_width_(parent_width), |
413 | 1.72M | byte_width_(static_cast<uint8_t>(1 << (packed_type & 3))), |
414 | 1.72M | type_(static_cast<Type>(packed_type >> 2)) {} |
415 | | |
416 | 0 | Type GetType() const { return type_; } |
417 | | |
418 | 0 | bool IsNull() const { return type_ == FBT_NULL; } |
419 | 0 | bool IsBool() const { return type_ == FBT_BOOL; } |
420 | 0 | bool IsInt() const { return type_ == FBT_INT || type_ == FBT_INDIRECT_INT; } |
421 | 0 | bool IsUInt() const { |
422 | 0 | return type_ == FBT_UINT || type_ == FBT_INDIRECT_UINT; |
423 | 0 | } |
424 | 0 | bool IsIntOrUint() const { return IsInt() || IsUInt(); } |
425 | 0 | bool IsFloat() const { |
426 | 0 | return type_ == FBT_FLOAT || type_ == FBT_INDIRECT_FLOAT; |
427 | 0 | } |
428 | 0 | bool IsNumeric() const { return IsIntOrUint() || IsFloat(); } |
429 | 0 | bool IsString() const { return type_ == FBT_STRING; } |
430 | 0 | bool IsKey() const { return type_ == FBT_KEY; } |
431 | 0 | bool IsVector() const { return type_ == FBT_VECTOR || type_ == FBT_MAP; } |
432 | 0 | bool IsUntypedVector() const { return type_ == FBT_VECTOR; } |
433 | 0 | bool IsTypedVector() const { return flexbuffers::IsTypedVector(type_); } |
434 | 0 | bool IsFixedTypedVector() const { |
435 | 0 | return flexbuffers::IsFixedTypedVector(type_); |
436 | 0 | } |
437 | 0 | bool IsAnyVector() const { |
438 | 0 | return (IsTypedVector() || IsFixedTypedVector() || IsVector()); |
439 | 0 | } |
440 | 0 | bool IsMap() const { return type_ == FBT_MAP; } |
441 | 0 | bool IsBlob() const { return type_ == FBT_BLOB; } |
442 | 0 | bool AsBool() const { |
443 | 0 | return (type_ == FBT_BOOL ? ReadUInt64(data_, parent_width_) |
444 | 0 | : AsUInt64()) != 0; |
445 | 0 | } |
446 | | |
447 | | // Reads any type as a int64_t. Never fails, does most sensible conversion. |
448 | | // Truncates floats, strings are attempted to be parsed for a number, |
449 | | // vectors/maps return their size. Returns 0 if all else fails. |
450 | 0 | int64_t AsInt64() const { |
451 | 0 | if (type_ == FBT_INT) { |
452 | 0 | // A fast path for the common case. |
453 | 0 | return ReadInt64(data_, parent_width_); |
454 | 0 | } else |
455 | 0 | switch (type_) { |
456 | 0 | case FBT_INDIRECT_INT: return ReadInt64(Indirect(), byte_width_); |
457 | 0 | case FBT_UINT: return ReadUInt64(data_, parent_width_); |
458 | 0 | case FBT_INDIRECT_UINT: return ReadUInt64(Indirect(), byte_width_); |
459 | 0 | case FBT_FLOAT: |
460 | 0 | return static_cast<int64_t>(ReadDouble(data_, parent_width_)); |
461 | 0 | case FBT_INDIRECT_FLOAT: |
462 | 0 | return static_cast<int64_t>(ReadDouble(Indirect(), byte_width_)); |
463 | 0 | case FBT_NULL: return 0; |
464 | 0 | case FBT_STRING: return flatbuffers::StringToInt(AsString().c_str()); |
465 | 0 | case FBT_VECTOR: return static_cast<int64_t>(AsVector().size()); |
466 | 0 | case FBT_BOOL: return ReadInt64(data_, parent_width_); |
467 | 0 | default: |
468 | 0 | // Convert other things to int. |
469 | 0 | return 0; |
470 | 0 | } |
471 | 0 | } |
472 | | |
473 | | // TODO: could specialize these to not use AsInt64() if that saves |
474 | | // extension ops in generated code, and use a faster op than ReadInt64. |
475 | 0 | int32_t AsInt32() const { return static_cast<int32_t>(AsInt64()); } |
476 | 0 | int16_t AsInt16() const { return static_cast<int16_t>(AsInt64()); } |
477 | 0 | int8_t AsInt8() const { return static_cast<int8_t>(AsInt64()); } |
478 | | |
479 | 0 | uint64_t AsUInt64() const { |
480 | 0 | if (type_ == FBT_UINT) { |
481 | 0 | // A fast path for the common case. |
482 | 0 | return ReadUInt64(data_, parent_width_); |
483 | 0 | } else |
484 | 0 | switch (type_) { |
485 | 0 | case FBT_INDIRECT_UINT: return ReadUInt64(Indirect(), byte_width_); |
486 | 0 | case FBT_INT: return ReadInt64(data_, parent_width_); |
487 | 0 | case FBT_INDIRECT_INT: return ReadInt64(Indirect(), byte_width_); |
488 | 0 | case FBT_FLOAT: |
489 | 0 | return static_cast<uint64_t>(ReadDouble(data_, parent_width_)); |
490 | 0 | case FBT_INDIRECT_FLOAT: |
491 | 0 | return static_cast<uint64_t>(ReadDouble(Indirect(), byte_width_)); |
492 | 0 | case FBT_NULL: return 0; |
493 | 0 | case FBT_STRING: return flatbuffers::StringToUInt(AsString().c_str()); |
494 | 0 | case FBT_VECTOR: return static_cast<uint64_t>(AsVector().size()); |
495 | 0 | case FBT_BOOL: return ReadUInt64(data_, parent_width_); |
496 | 0 | default: |
497 | 0 | // Convert other things to uint. |
498 | 0 | return 0; |
499 | 0 | } |
500 | 0 | } |
501 | | |
502 | 0 | uint32_t AsUInt32() const { return static_cast<uint32_t>(AsUInt64()); } |
503 | 0 | uint16_t AsUInt16() const { return static_cast<uint16_t>(AsUInt64()); } |
504 | 0 | uint8_t AsUInt8() const { return static_cast<uint8_t>(AsUInt64()); } |
505 | | |
506 | 0 | double AsDouble() const { |
507 | 0 | if (type_ == FBT_FLOAT) { |
508 | 0 | // A fast path for the common case. |
509 | 0 | return ReadDouble(data_, parent_width_); |
510 | 0 | } else |
511 | 0 | switch (type_) { |
512 | 0 | case FBT_INDIRECT_FLOAT: return ReadDouble(Indirect(), byte_width_); |
513 | 0 | case FBT_INT: |
514 | 0 | return static_cast<double>(ReadInt64(data_, parent_width_)); |
515 | 0 | case FBT_UINT: |
516 | 0 | return static_cast<double>(ReadUInt64(data_, parent_width_)); |
517 | 0 | case FBT_INDIRECT_INT: |
518 | 0 | return static_cast<double>(ReadInt64(Indirect(), byte_width_)); |
519 | 0 | case FBT_INDIRECT_UINT: |
520 | 0 | return static_cast<double>(ReadUInt64(Indirect(), byte_width_)); |
521 | 0 | case FBT_NULL: return 0.0; |
522 | 0 | case FBT_STRING: { |
523 | 0 | double d; |
524 | 0 | flatbuffers::StringToNumber(AsString().c_str(), &d); |
525 | 0 | return d; |
526 | 0 | } |
527 | 0 | case FBT_VECTOR: return static_cast<double>(AsVector().size()); |
528 | 0 | case FBT_BOOL: |
529 | 0 | return static_cast<double>(ReadUInt64(data_, parent_width_)); |
530 | 0 | default: |
531 | 0 | // Convert strings and other things to float. |
532 | 0 | return 0; |
533 | 0 | } |
534 | 0 | } |
535 | | |
536 | 0 | float AsFloat() const { return static_cast<float>(AsDouble()); } |
537 | | |
538 | 0 | const char *AsKey() const { |
539 | 0 | if (type_ == FBT_KEY || type_ == FBT_STRING) { |
540 | 0 | return reinterpret_cast<const char *>(Indirect()); |
541 | 0 | } else { |
542 | 0 | return ""; |
543 | 0 | } |
544 | 0 | } |
545 | | |
546 | | // This function returns the empty string if you try to read something that |
547 | | // is not a string or key. |
548 | 0 | String AsString() const { |
549 | 0 | if (type_ == FBT_STRING) { |
550 | 0 | return String(Indirect(), byte_width_); |
551 | 0 | } else if (type_ == FBT_KEY) { |
552 | 0 | auto key = Indirect(); |
553 | 0 | return String(key, byte_width_, |
554 | 0 | strlen(reinterpret_cast<const char *>(key))); |
555 | 0 | } else { |
556 | 0 | return String::EmptyString(); |
557 | 0 | } |
558 | 0 | } |
559 | | |
560 | | // Unlike AsString(), this will convert any type to a std::string. |
561 | 0 | std::string ToString() const { |
562 | 0 | std::string s; |
563 | 0 | ToString(false, false, s); |
564 | 0 | return s; |
565 | 0 | } |
566 | | |
567 | | // Convert any type to a JSON-like string. strings_quoted determines if |
568 | | // string values at the top level receive "" quotes (inside other values |
569 | | // they always do). keys_quoted determines if keys are quoted, at any level. |
570 | 0 | void ToString(bool strings_quoted, bool keys_quoted, std::string &s) const { |
571 | 0 | ToString(strings_quoted, keys_quoted, s, false, 0, "", false); |
572 | 0 | } |
573 | | |
574 | | // This version additionally allow you to specify if you want indentation. |
575 | | void ToString(bool strings_quoted, bool keys_quoted, std::string &s, |
576 | | bool indented, int cur_indent, const char *indent_string, |
577 | 0 | bool natural_utf8 = false) const { |
578 | 0 | if (type_ == FBT_STRING) { |
579 | 0 | String str(Indirect(), byte_width_); |
580 | 0 | if (strings_quoted) { |
581 | 0 | flatbuffers::EscapeString(str.c_str(), str.length(), &s, true, natural_utf8); |
582 | 0 | } else { |
583 | 0 | s.append(str.c_str(), str.length()); |
584 | 0 | } |
585 | 0 | } else if (IsKey()) { |
586 | 0 | auto str = AsKey(); |
587 | 0 | if (keys_quoted) { |
588 | 0 | flatbuffers::EscapeString(str, strlen(str), &s, true, natural_utf8); |
589 | 0 | } else { |
590 | 0 | s += str; |
591 | 0 | } |
592 | 0 | } else if (IsInt()) { |
593 | 0 | s += flatbuffers::NumToString(AsInt64()); |
594 | 0 | } else if (IsUInt()) { |
595 | 0 | s += flatbuffers::NumToString(AsUInt64()); |
596 | 0 | } else if (IsFloat()) { |
597 | 0 | s += flatbuffers::NumToString(AsDouble()); |
598 | 0 | } else if (IsNull()) { |
599 | 0 | s += "null"; |
600 | 0 | } else if (IsBool()) { |
601 | 0 | s += AsBool() ? "true" : "false"; |
602 | 0 | } else if (IsMap()) { |
603 | 0 | s += "{"; |
604 | 0 | s += indented ? "\n" : " "; |
605 | 0 | auto m = AsMap(); |
606 | 0 | auto keys = m.Keys(); |
607 | 0 | auto vals = m.Values(); |
608 | 0 | for (size_t i = 0; i < keys.size(); i++) { |
609 | 0 | bool kq = keys_quoted; |
610 | 0 | if (!kq) { |
611 | 0 | // FlexBuffers keys may contain arbitrary characters, only allow |
612 | 0 | // unquoted if it looks like an "identifier": |
613 | 0 | const char *p = keys[i].AsKey(); |
614 | 0 | if (!flatbuffers::is_alpha(*p) && *p != '_') { |
615 | 0 | kq = true; |
616 | 0 | } else { |
617 | 0 | while (*++p) { |
618 | 0 | if (!flatbuffers::is_alnum(*p) && *p != '_') { |
619 | 0 | kq = true; |
620 | 0 | break; |
621 | 0 | } |
622 | 0 | } |
623 | 0 | } |
624 | 0 | } |
625 | 0 | if (indented) IndentString(s, cur_indent + 1, indent_string); |
626 | 0 | keys[i].ToString(true, kq, s); |
627 | 0 | s += ": "; |
628 | 0 | vals[i].ToString(true, keys_quoted, s, indented, cur_indent + 1, indent_string, |
629 | 0 | natural_utf8); |
630 | 0 | if (i < keys.size() - 1) { |
631 | 0 | s += ","; |
632 | 0 | if (!indented) s += " "; |
633 | 0 | } |
634 | 0 | if (indented) s += "\n"; |
635 | 0 | } |
636 | 0 | if (!indented) s += " "; |
637 | 0 | if (indented) IndentString(s, cur_indent, indent_string); |
638 | 0 | s += "}"; |
639 | 0 | } else if (IsVector()) { |
640 | 0 | AppendToString<Vector>(s, AsVector(), keys_quoted, indented, |
641 | 0 | cur_indent + 1, indent_string, natural_utf8); |
642 | 0 | } else if (IsTypedVector()) { |
643 | 0 | AppendToString<TypedVector>(s, AsTypedVector(), keys_quoted, indented, |
644 | 0 | cur_indent + 1, indent_string, |
645 | 0 | natural_utf8); |
646 | 0 | } else if (IsFixedTypedVector()) { |
647 | 0 | AppendToString<FixedTypedVector>(s, AsFixedTypedVector(), keys_quoted, |
648 | 0 | indented, cur_indent + 1, indent_string, |
649 | 0 | natural_utf8); |
650 | 0 | } else if (IsBlob()) { |
651 | 0 | auto blob = AsBlob(); |
652 | 0 | flatbuffers::EscapeString(reinterpret_cast<const char *>(blob.data()), |
653 | 0 | blob.size(), &s, true, false); |
654 | 0 | } else { |
655 | 0 | s += "(?)"; |
656 | 0 | } |
657 | 0 | } |
658 | | |
659 | | // This function returns the empty blob if you try to read a not-blob. |
660 | | // Strings can be viewed as blobs too. |
661 | 0 | Blob AsBlob() const { |
662 | 0 | if (type_ == FBT_BLOB || type_ == FBT_STRING) { |
663 | 0 | return Blob(Indirect(), byte_width_); |
664 | 0 | } else { |
665 | 0 | return Blob::EmptyBlob(); |
666 | 0 | } |
667 | 0 | } |
668 | | |
669 | | // This function returns the empty vector if you try to read a not-vector. |
670 | | // Maps can be viewed as vectors too. |
671 | 0 | Vector AsVector() const { |
672 | 0 | if (type_ == FBT_VECTOR || type_ == FBT_MAP) { |
673 | 0 | return Vector(Indirect(), byte_width_); |
674 | 0 | } else { |
675 | 0 | return Vector::EmptyVector(); |
676 | 0 | } |
677 | 0 | } |
678 | | |
679 | 0 | TypedVector AsTypedVector() const { |
680 | 0 | if (IsTypedVector()) { |
681 | 0 | auto tv = |
682 | 0 | TypedVector(Indirect(), byte_width_, ToTypedVectorElementType(type_)); |
683 | 0 | if (tv.type_ == FBT_STRING) { |
684 | 0 | // These can't be accessed as strings, since we don't know the bit-width |
685 | 0 | // of the size field, see the declaration of |
686 | 0 | // FBT_VECTOR_STRING_DEPRECATED above for details. |
687 | 0 | // We change the type here to be keys, which are a subtype of strings, |
688 | 0 | // and will ignore the size field. This will truncate strings with |
689 | 0 | // embedded nulls. |
690 | 0 | tv.type_ = FBT_KEY; |
691 | 0 | } |
692 | 0 | return tv; |
693 | 0 | } else { |
694 | 0 | return TypedVector::EmptyTypedVector(); |
695 | 0 | } |
696 | 0 | } |
697 | | |
698 | 0 | FixedTypedVector AsFixedTypedVector() const { |
699 | 0 | if (IsFixedTypedVector()) { |
700 | 0 | uint8_t len = 0; |
701 | 0 | auto vtype = ToFixedTypedVectorElementType(type_, &len); |
702 | 0 | return FixedTypedVector(Indirect(), byte_width_, vtype, len); |
703 | 0 | } else { |
704 | 0 | return FixedTypedVector::EmptyFixedTypedVector(); |
705 | 0 | } |
706 | 0 | } |
707 | | |
708 | 0 | Map AsMap() const { |
709 | 0 | if (type_ == FBT_MAP) { |
710 | 0 | return Map(Indirect(), byte_width_); |
711 | 0 | } else { |
712 | 0 | return Map::EmptyMap(); |
713 | 0 | } |
714 | 0 | } |
715 | | |
716 | | template<typename T> T As() const; |
717 | | |
718 | | // Experimental: Mutation functions. |
719 | | // These allow scalars in an already created buffer to be updated in-place. |
720 | | // Since by default scalars are stored in the smallest possible space, |
721 | | // the new value may not fit, in which case these functions return false. |
722 | | // To avoid this, you can construct the values you intend to mutate using |
723 | | // Builder::ForceMinimumBitWidth. |
724 | 0 | bool MutateInt(int64_t i) { |
725 | 0 | if (type_ == FBT_INT) { |
726 | 0 | return Mutate(data_, i, parent_width_, WidthI(i)); |
727 | 0 | } else if (type_ == FBT_INDIRECT_INT) { |
728 | 0 | return Mutate(Indirect(), i, byte_width_, WidthI(i)); |
729 | 0 | } else if (type_ == FBT_UINT) { |
730 | 0 | auto u = static_cast<uint64_t>(i); |
731 | 0 | return Mutate(data_, u, parent_width_, WidthU(u)); |
732 | 0 | } else if (type_ == FBT_INDIRECT_UINT) { |
733 | 0 | auto u = static_cast<uint64_t>(i); |
734 | 0 | return Mutate(Indirect(), u, byte_width_, WidthU(u)); |
735 | 0 | } else { |
736 | 0 | return false; |
737 | 0 | } |
738 | 0 | } |
739 | | |
740 | 0 | bool MutateBool(bool b) { |
741 | 0 | return type_ == FBT_BOOL && Mutate(data_, b, parent_width_, BIT_WIDTH_8); |
742 | 0 | } |
743 | | |
744 | 0 | bool MutateUInt(uint64_t u) { |
745 | 0 | if (type_ == FBT_UINT) { |
746 | 0 | return Mutate(data_, u, parent_width_, WidthU(u)); |
747 | 0 | } else if (type_ == FBT_INDIRECT_UINT) { |
748 | 0 | return Mutate(Indirect(), u, byte_width_, WidthU(u)); |
749 | 0 | } else if (type_ == FBT_INT) { |
750 | 0 | auto i = static_cast<int64_t>(u); |
751 | 0 | return Mutate(data_, i, parent_width_, WidthI(i)); |
752 | 0 | } else if (type_ == FBT_INDIRECT_INT) { |
753 | 0 | auto i = static_cast<int64_t>(u); |
754 | 0 | return Mutate(Indirect(), i, byte_width_, WidthI(i)); |
755 | 0 | } else { |
756 | 0 | return false; |
757 | 0 | } |
758 | 0 | } |
759 | | |
760 | 0 | bool MutateFloat(float f) { |
761 | 0 | if (type_ == FBT_FLOAT) { |
762 | 0 | return MutateF(data_, f, parent_width_, BIT_WIDTH_32); |
763 | 0 | } else if (type_ == FBT_INDIRECT_FLOAT) { |
764 | 0 | return MutateF(Indirect(), f, byte_width_, BIT_WIDTH_32); |
765 | 0 | } else { |
766 | 0 | return false; |
767 | 0 | } |
768 | 0 | } |
769 | | |
770 | 0 | bool MutateFloat(double d) { |
771 | 0 | if (type_ == FBT_FLOAT) { |
772 | 0 | return MutateF(data_, d, parent_width_, WidthF(d)); |
773 | 0 | } else if (type_ == FBT_INDIRECT_FLOAT) { |
774 | 0 | return MutateF(Indirect(), d, byte_width_, WidthF(d)); |
775 | 0 | } else { |
776 | 0 | return false; |
777 | 0 | } |
778 | 0 | } |
779 | | |
780 | 0 | bool MutateString(const char *str, size_t len) { |
781 | 0 | auto s = AsString(); |
782 | 0 | if (s.IsTheEmptyString()) return false; |
783 | 0 | // This is very strict, could allow shorter strings, but that creates |
784 | 0 | // garbage. |
785 | 0 | if (s.length() != len) return false; |
786 | 0 | memcpy(const_cast<char *>(s.c_str()), str, len); |
787 | 0 | return true; |
788 | 0 | } |
789 | 0 | bool MutateString(const char *str) { return MutateString(str, strlen(str)); } |
790 | 0 | bool MutateString(const std::string &str) { |
791 | 0 | return MutateString(str.data(), str.length()); |
792 | 0 | } |
793 | | |
794 | | private: |
795 | 1.03M | const uint8_t *Indirect() const { |
796 | 1.03M | return flexbuffers::Indirect(data_, parent_width_); |
797 | 1.03M | } |
798 | | |
799 | | template<typename T> |
800 | | bool Mutate(const uint8_t *dest, T t, size_t byte_width, |
801 | 0 | BitWidth value_width) { |
802 | 0 | auto fits = static_cast<size_t>(static_cast<size_t>(1U) << value_width) <= |
803 | 0 | byte_width; |
804 | 0 | if (fits) { |
805 | 0 | t = flatbuffers::EndianScalar(t); |
806 | 0 | memcpy(const_cast<uint8_t *>(dest), &t, byte_width); |
807 | 0 | } |
808 | 0 | return fits; |
809 | 0 | } Unexecuted instantiation: bool flexbuffers::Reference::Mutate<long>(unsigned char const*, long, unsigned long, flexbuffers::BitWidth) Unexecuted instantiation: bool flexbuffers::Reference::Mutate<unsigned long>(unsigned char const*, unsigned long, unsigned long, flexbuffers::BitWidth) Unexecuted instantiation: bool flexbuffers::Reference::Mutate<bool>(unsigned char const*, bool, unsigned long, flexbuffers::BitWidth) Unexecuted instantiation: bool flexbuffers::Reference::Mutate<double>(unsigned char const*, double, unsigned long, flexbuffers::BitWidth) Unexecuted instantiation: bool flexbuffers::Reference::Mutate<float>(unsigned char const*, float, unsigned long, flexbuffers::BitWidth) |
810 | | |
811 | | template<typename T> |
812 | | bool MutateF(const uint8_t *dest, T t, size_t byte_width, |
813 | 0 | BitWidth value_width) { |
814 | 0 | if (byte_width == sizeof(double)) |
815 | 0 | return Mutate(dest, static_cast<double>(t), byte_width, value_width); |
816 | 0 | if (byte_width == sizeof(float)) |
817 | 0 | return Mutate(dest, static_cast<float>(t), byte_width, value_width); |
818 | 0 | FLATBUFFERS_ASSERT(false); |
819 | 0 | return false; |
820 | 0 | } Unexecuted instantiation: bool flexbuffers::Reference::MutateF<float>(unsigned char const*, float, unsigned long, flexbuffers::BitWidth) Unexecuted instantiation: bool flexbuffers::Reference::MutateF<double>(unsigned char const*, double, unsigned long, flexbuffers::BitWidth) |
821 | | |
822 | | friend class Verifier; |
823 | | |
824 | | const uint8_t *data_; |
825 | | uint8_t parent_width_; |
826 | | uint8_t byte_width_; |
827 | | Type type_; |
828 | | }; |
829 | | |
830 | | // Template specialization for As(). |
831 | 0 | template<> inline bool Reference::As<bool>() const { return AsBool(); } |
832 | | |
833 | 0 | template<> inline int8_t Reference::As<int8_t>() const { return AsInt8(); } |
834 | 0 | template<> inline int16_t Reference::As<int16_t>() const { return AsInt16(); } |
835 | 0 | template<> inline int32_t Reference::As<int32_t>() const { return AsInt32(); } |
836 | 0 | template<> inline int64_t Reference::As<int64_t>() const { return AsInt64(); } |
837 | | |
838 | 0 | template<> inline uint8_t Reference::As<uint8_t>() const { return AsUInt8(); } |
839 | 0 | template<> inline uint16_t Reference::As<uint16_t>() const { |
840 | 0 | return AsUInt16(); |
841 | 0 | } |
842 | 0 | template<> inline uint32_t Reference::As<uint32_t>() const { |
843 | 0 | return AsUInt32(); |
844 | 0 | } |
845 | 0 | template<> inline uint64_t Reference::As<uint64_t>() const { |
846 | 0 | return AsUInt64(); |
847 | 0 | } |
848 | | |
849 | 0 | template<> inline double Reference::As<double>() const { return AsDouble(); } |
850 | 0 | template<> inline float Reference::As<float>() const { return AsFloat(); } |
851 | | |
852 | 0 | template<> inline String Reference::As<String>() const { return AsString(); } |
853 | 0 | template<> inline std::string Reference::As<std::string>() const { |
854 | 0 | return AsString().str(); |
855 | 0 | } |
856 | | |
857 | 0 | template<> inline Blob Reference::As<Blob>() const { return AsBlob(); } |
858 | 0 | template<> inline Vector Reference::As<Vector>() const { return AsVector(); } |
859 | 0 | template<> inline TypedVector Reference::As<TypedVector>() const { |
860 | 0 | return AsTypedVector(); |
861 | 0 | } |
862 | 0 | template<> inline FixedTypedVector Reference::As<FixedTypedVector>() const { |
863 | 0 | return AsFixedTypedVector(); |
864 | 0 | } |
865 | 0 | template<> inline Map Reference::As<Map>() const { return AsMap(); } |
866 | | |
867 | 0 | inline uint8_t PackedType(BitWidth bit_width, Type type) { |
868 | 0 | return static_cast<uint8_t>(bit_width | (type << 2)); |
869 | 0 | } |
870 | | |
871 | 0 | inline uint8_t NullPackedType() { return PackedType(BIT_WIDTH_8, FBT_NULL); } |
872 | | |
873 | | // Vector accessors. |
874 | | // Note: if you try to access outside of bounds, you get a Null value back |
875 | | // instead. Normally this would be an assert, but since this is "dynamically |
876 | | // typed" data, you may not want that (someone sends you a 2d vector and you |
877 | | // wanted 3d). |
878 | | // The Null converts seamlessly into a default value for any other type. |
879 | | // TODO(wvo): Could introduce an #ifdef that makes this into an assert? |
880 | 1.71M | inline Reference Vector::operator[](size_t i) const { |
881 | 1.71M | auto len = size(); |
882 | 1.71M | if (i >= len) return Reference(nullptr, 1, NullPackedType()); |
883 | 1.71M | auto packed_type = (data_ + len * byte_width_)[i]; |
884 | 1.71M | auto elem = data_ + i * byte_width_; |
885 | 1.71M | return Reference(elem, byte_width_, packed_type); |
886 | 1.71M | } |
887 | | |
888 | 106k | inline Reference TypedVector::operator[](size_t i) const { |
889 | 106k | auto len = size(); |
890 | 106k | if (i >= len) return Reference(nullptr, 1, NullPackedType()); |
891 | 106k | auto elem = data_ + i * byte_width_; |
892 | 106k | return Reference(elem, byte_width_, 1, type_); |
893 | 106k | } |
894 | | |
895 | 0 | inline Reference FixedTypedVector::operator[](size_t i) const { |
896 | 0 | if (i >= len_) return Reference(nullptr, 1, NullPackedType()); |
897 | 0 | auto elem = data_ + i * byte_width_; |
898 | 0 | return Reference(elem, byte_width_, 1, type_); |
899 | 0 | } |
900 | | |
901 | 0 | template<typename T> int KeyCompare(const void *key, const void *elem) { |
902 | 0 | auto str_elem = reinterpret_cast<const char *>( |
903 | 0 | Indirect<T>(reinterpret_cast<const uint8_t *>(elem))); |
904 | 0 | auto skey = reinterpret_cast<const char *>(key); |
905 | 0 | return strcmp(skey, str_elem); |
906 | 0 | } Unexecuted instantiation: int flexbuffers::KeyCompare<unsigned char>(void const*, void const*) Unexecuted instantiation: int flexbuffers::KeyCompare<unsigned short>(void const*, void const*) Unexecuted instantiation: int flexbuffers::KeyCompare<unsigned int>(void const*, void const*) Unexecuted instantiation: int flexbuffers::KeyCompare<unsigned long>(void const*, void const*) |
907 | | |
908 | 0 | inline Reference Map::operator[](const char *key) const { |
909 | 0 | auto keys = Keys(); |
910 | 0 | // We can't pass keys.byte_width_ to the comparison function, so we have |
911 | 0 | // to pick the right one ahead of time. |
912 | 0 | int (*comp)(const void *, const void *) = nullptr; |
913 | 0 | switch (keys.byte_width_) { |
914 | 0 | case 1: comp = KeyCompare<uint8_t>; break; |
915 | 0 | case 2: comp = KeyCompare<uint16_t>; break; |
916 | 0 | case 4: comp = KeyCompare<uint32_t>; break; |
917 | 0 | case 8: comp = KeyCompare<uint64_t>; break; |
918 | 0 | default: FLATBUFFERS_ASSERT(false); return Reference(); |
919 | 0 | } |
920 | 0 | auto res = std::bsearch(key, keys.data_, keys.size(), keys.byte_width_, comp); |
921 | 0 | if (!res) return Reference(nullptr, 1, NullPackedType()); |
922 | 0 | auto i = (reinterpret_cast<uint8_t *>(res) - keys.data_) / keys.byte_width_; |
923 | 0 | return (*static_cast<const Vector *>(this))[i]; |
924 | 0 | } |
925 | | |
926 | 0 | inline Reference Map::operator[](const std::string &key) const { |
927 | 0 | return (*this)[key.c_str()]; |
928 | 0 | } |
929 | | |
930 | 0 | inline Reference GetRoot(const uint8_t *buffer, size_t size) { |
931 | 0 | // See Finish() below for the serialization counterpart of this. |
932 | 0 | // The root starts at the end of the buffer, so we parse backwards from there. |
933 | 0 | auto end = buffer + size; |
934 | 0 | auto byte_width = *--end; |
935 | 0 | auto packed_type = *--end; |
936 | 0 | end -= byte_width; // The root data item. |
937 | 0 | return Reference(end, byte_width, packed_type); |
938 | 0 | } |
939 | | |
940 | 0 | inline Reference GetRoot(const std::vector<uint8_t> &buffer) { |
941 | 0 | return GetRoot(buffer.data(), buffer.size()); |
942 | 0 | } |
943 | | |
944 | | // Flags that configure how the Builder behaves. |
945 | | // The "Share" flags determine if the Builder automatically tries to pool |
946 | | // this type. Pooling can reduce the size of serialized data if there are |
947 | | // multiple maps of the same kind, at the expense of slightly slower |
948 | | // serialization (the cost of lookups) and more memory use (std::set). |
949 | | // By default this is on for keys, but off for strings. |
950 | | // Turn keys off if you have e.g. only one map. |
951 | | // Turn strings on if you expect many non-unique string values. |
952 | | // Additionally, sharing key vectors can save space if you have maps with |
953 | | // identical field populations. |
954 | | enum BuilderFlag { |
955 | | BUILDER_FLAG_NONE = 0, |
956 | | BUILDER_FLAG_SHARE_KEYS = 1, |
957 | | BUILDER_FLAG_SHARE_STRINGS = 2, |
958 | | BUILDER_FLAG_SHARE_KEYS_AND_STRINGS = 3, |
959 | | BUILDER_FLAG_SHARE_KEY_VECTORS = 4, |
960 | | BUILDER_FLAG_SHARE_ALL = 7, |
961 | | }; |
962 | | |
963 | | class Builder FLATBUFFERS_FINAL_CLASS { |
964 | | public: |
965 | | Builder(size_t initial_size = 256, |
966 | | BuilderFlag flags = BUILDER_FLAG_SHARE_KEYS) |
967 | | : buf_(initial_size), |
968 | | finished_(false), |
969 | | has_duplicate_keys_(false), |
970 | | flags_(flags), |
971 | | force_min_bit_width_(BIT_WIDTH_8), |
972 | | key_pool(KeyOffsetCompare(buf_)), |
973 | 0 | string_pool(StringOffsetCompare(buf_)) { |
974 | 0 | buf_.clear(); |
975 | 0 | } |
976 | | |
977 | | #ifdef FLATBUFFERS_DEFAULT_DECLARATION |
978 | | Builder(Builder &&) = default; |
979 | | Builder &operator=(Builder &&) = default; |
980 | | #endif |
981 | | |
982 | | /// @brief Get the serialized buffer (after you call `Finish()`). |
983 | | /// @return Returns a vector owned by this class. |
984 | 0 | const std::vector<uint8_t> &GetBuffer() const { |
985 | 0 | Finished(); |
986 | 0 | return buf_; |
987 | 0 | } |
988 | | |
989 | | // Size of the buffer. Does not include unfinished values. |
990 | 0 | size_t GetSize() const { return buf_.size(); } |
991 | | |
992 | | // Reset all state so we can re-use the buffer. |
993 | 0 | void Clear() { |
994 | 0 | buf_.clear(); |
995 | 0 | stack_.clear(); |
996 | 0 | finished_ = false; |
997 | 0 | // flags_ remains as-is; |
998 | 0 | force_min_bit_width_ = BIT_WIDTH_8; |
999 | 0 | key_pool.clear(); |
1000 | 0 | string_pool.clear(); |
1001 | 0 | } |
1002 | | |
1003 | | // All value constructing functions below have two versions: one that |
1004 | | // takes a key (for placement inside a map) and one that doesn't (for inside |
1005 | | // vectors and elsewhere). |
1006 | | |
1007 | 0 | void Null() { stack_.push_back(Value()); } |
1008 | 0 | void Null(const char *key) { |
1009 | 0 | Key(key); |
1010 | 0 | Null(); |
1011 | 0 | } |
1012 | | |
1013 | 0 | void Int(int64_t i) { stack_.push_back(Value(i, FBT_INT, WidthI(i))); } |
1014 | 0 | void Int(const char *key, int64_t i) { |
1015 | 0 | Key(key); |
1016 | 0 | Int(i); |
1017 | 0 | } |
1018 | | |
1019 | 0 | void UInt(uint64_t u) { stack_.push_back(Value(u, FBT_UINT, WidthU(u))); } |
1020 | 0 | void UInt(const char *key, uint64_t u) { |
1021 | 0 | Key(key); |
1022 | 0 | UInt(u); |
1023 | 0 | } |
1024 | | |
1025 | 0 | void Float(float f) { stack_.push_back(Value(f)); } |
1026 | 0 | void Float(const char *key, float f) { |
1027 | 0 | Key(key); |
1028 | 0 | Float(f); |
1029 | 0 | } |
1030 | | |
1031 | 0 | void Double(double f) { stack_.push_back(Value(f)); } |
1032 | 0 | void Double(const char *key, double d) { |
1033 | 0 | Key(key); |
1034 | 0 | Double(d); |
1035 | 0 | } |
1036 | | |
1037 | 0 | void Bool(bool b) { stack_.push_back(Value(b)); } |
1038 | 0 | void Bool(const char *key, bool b) { |
1039 | 0 | Key(key); |
1040 | 0 | Bool(b); |
1041 | 0 | } |
1042 | | |
1043 | 0 | void IndirectInt(int64_t i) { PushIndirect(i, FBT_INDIRECT_INT, WidthI(i)); } |
1044 | 0 | void IndirectInt(const char *key, int64_t i) { |
1045 | 0 | Key(key); |
1046 | 0 | IndirectInt(i); |
1047 | 0 | } |
1048 | | |
1049 | 0 | void IndirectUInt(uint64_t u) { |
1050 | 0 | PushIndirect(u, FBT_INDIRECT_UINT, WidthU(u)); |
1051 | 0 | } |
1052 | 0 | void IndirectUInt(const char *key, uint64_t u) { |
1053 | 0 | Key(key); |
1054 | 0 | IndirectUInt(u); |
1055 | 0 | } |
1056 | | |
1057 | 0 | void IndirectFloat(float f) { |
1058 | 0 | PushIndirect(f, FBT_INDIRECT_FLOAT, BIT_WIDTH_32); |
1059 | 0 | } |
1060 | 0 | void IndirectFloat(const char *key, float f) { |
1061 | 0 | Key(key); |
1062 | 0 | IndirectFloat(f); |
1063 | 0 | } |
1064 | | |
1065 | 0 | void IndirectDouble(double f) { |
1066 | 0 | PushIndirect(f, FBT_INDIRECT_FLOAT, WidthF(f)); |
1067 | 0 | } |
1068 | 0 | void IndirectDouble(const char *key, double d) { |
1069 | 0 | Key(key); |
1070 | 0 | IndirectDouble(d); |
1071 | 0 | } |
1072 | | |
1073 | 0 | size_t Key(const char *str, size_t len) { |
1074 | 0 | auto sloc = buf_.size(); |
1075 | 0 | WriteBytes(str, len + 1); |
1076 | 0 | if (flags_ & BUILDER_FLAG_SHARE_KEYS) { |
1077 | 0 | auto it = key_pool.find(sloc); |
1078 | 0 | if (it != key_pool.end()) { |
1079 | 0 | // Already in the buffer. Remove key we just serialized, and use |
1080 | 0 | // existing offset instead. |
1081 | 0 | buf_.resize(sloc); |
1082 | 0 | sloc = *it; |
1083 | 0 | } else { |
1084 | 0 | key_pool.insert(sloc); |
1085 | 0 | } |
1086 | 0 | } |
1087 | 0 | stack_.push_back(Value(static_cast<uint64_t>(sloc), FBT_KEY, BIT_WIDTH_8)); |
1088 | 0 | return sloc; |
1089 | 0 | } |
1090 | | |
1091 | 0 | size_t Key(const char *str) { return Key(str, strlen(str)); } |
1092 | 0 | size_t Key(const std::string &str) { return Key(str.c_str(), str.size()); } |
1093 | | |
1094 | 0 | size_t String(const char *str, size_t len) { |
1095 | 0 | auto reset_to = buf_.size(); |
1096 | 0 | auto sloc = CreateBlob(str, len, 1, FBT_STRING); |
1097 | 0 | if (flags_ & BUILDER_FLAG_SHARE_STRINGS) { |
1098 | 0 | StringOffset so(sloc, len); |
1099 | 0 | auto it = string_pool.find(so); |
1100 | 0 | if (it != string_pool.end()) { |
1101 | 0 | // Already in the buffer. Remove string we just serialized, and use |
1102 | 0 | // existing offset instead. |
1103 | 0 | buf_.resize(reset_to); |
1104 | 0 | sloc = it->first; |
1105 | 0 | stack_.back().u_ = sloc; |
1106 | 0 | } else { |
1107 | 0 | string_pool.insert(so); |
1108 | 0 | } |
1109 | 0 | } |
1110 | 0 | return sloc; |
1111 | 0 | } |
1112 | 0 | size_t String(const char *str) { return String(str, strlen(str)); } |
1113 | 0 | size_t String(const std::string &str) { |
1114 | 0 | return String(str.c_str(), str.size()); |
1115 | 0 | } |
1116 | 0 | void String(const flexbuffers::String &str) { |
1117 | 0 | String(str.c_str(), str.length()); |
1118 | 0 | } |
1119 | | |
1120 | 0 | void String(const char *key, const char *str) { |
1121 | 0 | Key(key); |
1122 | 0 | String(str); |
1123 | 0 | } |
1124 | 0 | void String(const char *key, const std::string &str) { |
1125 | 0 | Key(key); |
1126 | 0 | String(str); |
1127 | 0 | } |
1128 | 0 | void String(const char *key, const flexbuffers::String &str) { |
1129 | 0 | Key(key); |
1130 | 0 | String(str); |
1131 | 0 | } |
1132 | | |
1133 | 0 | size_t Blob(const void *data, size_t len) { |
1134 | 0 | return CreateBlob(data, len, 0, FBT_BLOB); |
1135 | 0 | } |
1136 | 0 | size_t Blob(const std::vector<uint8_t> &v) { |
1137 | 0 | return CreateBlob(v.data(), v.size(), 0, FBT_BLOB); |
1138 | 0 | } |
1139 | | |
1140 | 0 | void Blob(const char *key, const void *data, size_t len) { |
1141 | 0 | Key(key); |
1142 | 0 | Blob(data, len); |
1143 | 0 | } |
1144 | 0 | void Blob(const char *key, const std::vector<uint8_t> &v) { |
1145 | 0 | Key(key); |
1146 | 0 | Blob(v); |
1147 | 0 | } |
1148 | | |
1149 | | // TODO(wvo): support all the FlexBuffer types (like flexbuffers::String), |
1150 | | // e.g. Vector etc. Also in overloaded versions. |
1151 | | // Also some FlatBuffers types? |
1152 | | |
1153 | 0 | size_t StartVector() { return stack_.size(); } |
1154 | 0 | size_t StartVector(const char *key) { |
1155 | 0 | Key(key); |
1156 | 0 | return stack_.size(); |
1157 | 0 | } |
1158 | 0 | size_t StartMap() { return stack_.size(); } |
1159 | 0 | size_t StartMap(const char *key) { |
1160 | 0 | Key(key); |
1161 | 0 | return stack_.size(); |
1162 | 0 | } |
1163 | | |
1164 | | // TODO(wvo): allow this to specify an alignment greater than the natural |
1165 | | // alignment. |
1166 | 0 | size_t EndVector(size_t start, bool typed, bool fixed) { |
1167 | 0 | auto vec = CreateVector(start, stack_.size() - start, 1, typed, fixed); |
1168 | 0 | // Remove temp elements and return vector. |
1169 | 0 | stack_.resize(start); |
1170 | 0 | stack_.push_back(vec); |
1171 | 0 | return static_cast<size_t>(vec.u_); |
1172 | 0 | } |
1173 | | |
1174 | 0 | size_t EndMap(size_t start) { |
1175 | 0 | // We should have interleaved keys and values on the stack. |
1176 | 0 | auto len = MapElementCount(start); |
1177 | 0 | // Make sure keys are all strings: |
1178 | 0 | for (auto key = start; key < stack_.size(); key += 2) { |
1179 | 0 | FLATBUFFERS_ASSERT(stack_[key].type_ == FBT_KEY); |
1180 | 0 | } |
1181 | 0 | // Now sort values, so later we can do a binary search lookup. |
1182 | 0 | // We want to sort 2 array elements at a time. |
1183 | 0 | struct TwoValue { |
1184 | 0 | Value key; |
1185 | 0 | Value val; |
1186 | 0 | }; |
1187 | 0 | // TODO(wvo): strict aliasing? |
1188 | 0 | // TODO(wvo): allow the caller to indicate the data is already sorted |
1189 | 0 | // for maximum efficiency? With an assert to check sortedness to make sure |
1190 | 0 | // we're not breaking binary search. |
1191 | 0 | // Or, we can track if the map is sorted as keys are added which would be |
1192 | 0 | // be quite cheap (cheaper than checking it here), so we can skip this |
1193 | 0 | // step automatically when appliccable, and encourage people to write in |
1194 | 0 | // sorted fashion. |
1195 | 0 | // std::sort is typically already a lot faster on sorted data though. |
1196 | 0 | auto dict = reinterpret_cast<TwoValue *>(stack_.data() + start); |
1197 | 0 | std::sort( |
1198 | 0 | dict, dict + len, [&](const TwoValue &a, const TwoValue &b) -> bool { |
1199 | 0 | auto as = reinterpret_cast<const char *>(buf_.data() + a.key.u_); |
1200 | 0 | auto bs = reinterpret_cast<const char *>(buf_.data() + b.key.u_); |
1201 | 0 | auto comp = strcmp(as, bs); |
1202 | 0 | // We want to disallow duplicate keys, since this results in a |
1203 | 0 | // map where values cannot be found. |
1204 | 0 | // But we can't assert here (since we don't want to fail on |
1205 | 0 | // random JSON input) or have an error mechanism. |
1206 | 0 | // Instead, we set has_duplicate_keys_ in the builder to |
1207 | 0 | // signal this. |
1208 | 0 | // TODO: Have to check for pointer equality, as some sort |
1209 | 0 | // implementation apparently call this function with the same |
1210 | 0 | // element?? Why? |
1211 | 0 | if (!comp && &a != &b) has_duplicate_keys_ = true; |
1212 | 0 | return comp < 0; |
1213 | 0 | }); |
1214 | 0 | // First create a vector out of all keys. |
1215 | 0 | // TODO(wvo): if kBuilderFlagShareKeyVectors is true, see if we can share |
1216 | 0 | // the first vector. |
1217 | 0 | auto keys = CreateVector(start, len, 2, true, false); |
1218 | 0 | auto vec = CreateVector(start + 1, len, 2, false, false, &keys); |
1219 | 0 | // Remove temp elements and return map. |
1220 | 0 | stack_.resize(start); |
1221 | 0 | stack_.push_back(vec); |
1222 | 0 | return static_cast<size_t>(vec.u_); |
1223 | 0 | } |
1224 | | |
1225 | | // Call this after EndMap to see if the map had any duplicate keys. |
1226 | | // Any map with such keys won't be able to retrieve all values. |
1227 | 0 | bool HasDuplicateKeys() const { return has_duplicate_keys_; } |
1228 | | |
1229 | | template<typename F> size_t Vector(F f) { |
1230 | | auto start = StartVector(); |
1231 | | f(); |
1232 | | return EndVector(start, false, false); |
1233 | | } |
1234 | | template<typename F, typename T> size_t Vector(F f, T &state) { |
1235 | | auto start = StartVector(); |
1236 | | f(state); |
1237 | | return EndVector(start, false, false); |
1238 | | } |
1239 | | template<typename F> size_t Vector(const char *key, F f) { |
1240 | | auto start = StartVector(key); |
1241 | | f(); |
1242 | | return EndVector(start, false, false); |
1243 | | } |
1244 | | template<typename F, typename T> |
1245 | | size_t Vector(const char *key, F f, T &state) { |
1246 | | auto start = StartVector(key); |
1247 | | f(state); |
1248 | | return EndVector(start, false, false); |
1249 | | } |
1250 | | |
1251 | | template<typename T> void Vector(const T *elems, size_t len) { |
1252 | | if (flatbuffers::is_scalar<T>::value) { |
1253 | | // This path should be a lot quicker and use less space. |
1254 | | ScalarVector(elems, len, false); |
1255 | | } else { |
1256 | | auto start = StartVector(); |
1257 | | for (size_t i = 0; i < len; i++) Add(elems[i]); |
1258 | | EndVector(start, false, false); |
1259 | | } |
1260 | | } |
1261 | | template<typename T> |
1262 | | void Vector(const char *key, const T *elems, size_t len) { |
1263 | | Key(key); |
1264 | | Vector(elems, len); |
1265 | | } |
1266 | | template<typename T> void Vector(const std::vector<T> &vec) { |
1267 | | Vector(vec.data(), vec.size()); |
1268 | | } |
1269 | | |
1270 | | template<typename F> size_t TypedVector(F f) { |
1271 | | auto start = StartVector(); |
1272 | | f(); |
1273 | | return EndVector(start, true, false); |
1274 | | } |
1275 | | template<typename F, typename T> size_t TypedVector(F f, T &state) { |
1276 | | auto start = StartVector(); |
1277 | | f(state); |
1278 | | return EndVector(start, true, false); |
1279 | | } |
1280 | | template<typename F> size_t TypedVector(const char *key, F f) { |
1281 | | auto start = StartVector(key); |
1282 | | f(); |
1283 | | return EndVector(start, true, false); |
1284 | | } |
1285 | | template<typename F, typename T> |
1286 | | size_t TypedVector(const char *key, F f, T &state) { |
1287 | | auto start = StartVector(key); |
1288 | | f(state); |
1289 | | return EndVector(start, true, false); |
1290 | | } |
1291 | | |
1292 | | template<typename T> size_t FixedTypedVector(const T *elems, size_t len) { |
1293 | | // We only support a few fixed vector lengths. Anything bigger use a |
1294 | | // regular typed vector. |
1295 | | FLATBUFFERS_ASSERT(len >= 2 && len <= 4); |
1296 | | // And only scalar values. |
1297 | | static_assert(flatbuffers::is_scalar<T>::value, "Unrelated types"); |
1298 | | return ScalarVector(elems, len, true); |
1299 | | } |
1300 | | |
1301 | | template<typename T> |
1302 | | size_t FixedTypedVector(const char *key, const T *elems, size_t len) { |
1303 | | Key(key); |
1304 | | return FixedTypedVector(elems, len); |
1305 | | } |
1306 | | |
1307 | | template<typename F> size_t Map(F f) { |
1308 | | auto start = StartMap(); |
1309 | | f(); |
1310 | | return EndMap(start); |
1311 | | } |
1312 | | template<typename F, typename T> size_t Map(F f, T &state) { |
1313 | | auto start = StartMap(); |
1314 | | f(state); |
1315 | | return EndMap(start); |
1316 | | } |
1317 | | template<typename F> size_t Map(const char *key, F f) { |
1318 | | auto start = StartMap(key); |
1319 | | f(); |
1320 | | return EndMap(start); |
1321 | | } |
1322 | | template<typename F, typename T> size_t Map(const char *key, F f, T &state) { |
1323 | | auto start = StartMap(key); |
1324 | | f(state); |
1325 | | return EndMap(start); |
1326 | | } |
1327 | | template<typename T> void Map(const std::map<std::string, T> &map) { |
1328 | | auto start = StartMap(); |
1329 | | for (auto it = map.begin(); it != map.end(); ++it) |
1330 | | Add(it->first.c_str(), it->second); |
1331 | | EndMap(start); |
1332 | | } |
1333 | | |
1334 | 0 | size_t MapElementCount(size_t start) { |
1335 | 0 | // Make sure it is an even number: |
1336 | 0 | auto len = stack_.size() - start; |
1337 | 0 | FLATBUFFERS_ASSERT(!(len & 1)); |
1338 | 0 | len /= 2; |
1339 | 0 | return len; |
1340 | 0 | } |
1341 | | |
1342 | | // If you wish to share a value explicitly (a value not shared automatically |
1343 | | // through one of the BUILDER_FLAG_SHARE_* flags) you can do so with these |
1344 | | // functions. Or if you wish to turn those flags off for performance reasons |
1345 | | // and still do some explicit sharing. For example: |
1346 | | // builder.IndirectDouble(M_PI); |
1347 | | // auto id = builder.LastValue(); // Remember where we stored it. |
1348 | | // .. more code goes here .. |
1349 | | // builder.ReuseValue(id); // Refers to same double by offset. |
1350 | | // LastValue works regardless of whether the value has a key or not. |
1351 | | // Works on any data type. |
1352 | | struct Value; |
1353 | 0 | Value LastValue() { return stack_.back(); } |
1354 | 0 | void ReuseValue(Value v) { stack_.push_back(v); } |
1355 | 0 | void ReuseValue(const char *key, Value v) { |
1356 | 0 | Key(key); |
1357 | 0 | ReuseValue(v); |
1358 | 0 | } |
1359 | | |
1360 | | // Undo the last element serialized. Call once for a value and once for a |
1361 | | // key. |
1362 | 0 | void Undo() { |
1363 | 0 | stack_.pop_back(); |
1364 | 0 | } |
1365 | | |
1366 | | // Overloaded Add that tries to call the correct function above. |
1367 | 0 | void Add(int8_t i) { Int(i); } |
1368 | 0 | void Add(int16_t i) { Int(i); } |
1369 | 0 | void Add(int32_t i) { Int(i); } |
1370 | 0 | void Add(int64_t i) { Int(i); } |
1371 | 0 | void Add(uint8_t u) { UInt(u); } |
1372 | 0 | void Add(uint16_t u) { UInt(u); } |
1373 | 0 | void Add(uint32_t u) { UInt(u); } |
1374 | 0 | void Add(uint64_t u) { UInt(u); } |
1375 | 0 | void Add(float f) { Float(f); } |
1376 | 0 | void Add(double d) { Double(d); } |
1377 | 0 | void Add(bool b) { Bool(b); } |
1378 | 0 | void Add(const char *str) { String(str); } |
1379 | 0 | void Add(const std::string &str) { String(str); } |
1380 | 0 | void Add(const flexbuffers::String &str) { String(str); } |
1381 | | |
1382 | | template<typename T> void Add(const std::vector<T> &vec) { Vector(vec); } |
1383 | | |
1384 | | template<typename T> void Add(const char *key, const T &t) { |
1385 | | Key(key); |
1386 | | Add(t); |
1387 | | } |
1388 | | |
1389 | | template<typename T> void Add(const std::map<std::string, T> &map) { |
1390 | | Map(map); |
1391 | | } |
1392 | | |
1393 | | template<typename T> void operator+=(const T &t) { Add(t); } |
1394 | | |
1395 | | // This function is useful in combination with the Mutate* functions above. |
1396 | | // It forces elements of vectors and maps to have a minimum size, such that |
1397 | | // they can later be updated without failing. |
1398 | | // Call with no arguments to reset. |
1399 | 0 | void ForceMinimumBitWidth(BitWidth bw = BIT_WIDTH_8) { |
1400 | 0 | force_min_bit_width_ = bw; |
1401 | 0 | } |
1402 | | |
1403 | 0 | void Finish() { |
1404 | 0 | // If you hit this assert, you likely have objects that were never included |
1405 | 0 | // in a parent. You need to have exactly one root to finish a buffer. |
1406 | 0 | // Check your Start/End calls are matched, and all objects are inside |
1407 | 0 | // some other object. |
1408 | 0 | FLATBUFFERS_ASSERT(stack_.size() == 1); |
1409 | 0 |
|
1410 | 0 | // Write root value. |
1411 | 0 | auto byte_width = Align(stack_[0].ElemWidth(buf_.size(), 0)); |
1412 | 0 | WriteAny(stack_[0], byte_width); |
1413 | 0 | // Write root type. |
1414 | 0 | Write(stack_[0].StoredPackedType(), 1); |
1415 | 0 | // Write root size. Normally determined by parent, but root has no parent :) |
1416 | 0 | Write(byte_width, 1); |
1417 | 0 |
|
1418 | 0 | finished_ = true; |
1419 | 0 | } |
1420 | | |
1421 | | private: |
1422 | 0 | void Finished() const { |
1423 | 0 | // If you get this assert, you're attempting to get access a buffer |
1424 | 0 | // which hasn't been finished yet. Be sure to call |
1425 | 0 | // Builder::Finish with your root object. |
1426 | 0 | FLATBUFFERS_ASSERT(finished_); |
1427 | 0 | } |
1428 | | |
1429 | | // Align to prepare for writing a scalar with a certain size. |
1430 | 0 | uint8_t Align(BitWidth alignment) { |
1431 | 0 | auto byte_width = 1U << alignment; |
1432 | 0 | buf_.insert(buf_.end(), flatbuffers::PaddingBytes(buf_.size(), byte_width), |
1433 | 0 | 0); |
1434 | 0 | return static_cast<uint8_t>(byte_width); |
1435 | 0 | } |
1436 | | |
1437 | 0 | void WriteBytes(const void *val, size_t size) { |
1438 | 0 | buf_.insert(buf_.end(), reinterpret_cast<const uint8_t *>(val), |
1439 | 0 | reinterpret_cast<const uint8_t *>(val) + size); |
1440 | 0 | } |
1441 | | |
1442 | 0 | template<typename T> void Write(T val, size_t byte_width) { |
1443 | 0 | FLATBUFFERS_ASSERT(sizeof(T) >= byte_width); |
1444 | 0 | val = flatbuffers::EndianScalar(val); |
1445 | 0 | WriteBytes(&val, byte_width); |
1446 | 0 | } Unexecuted instantiation: void flexbuffers::Builder::Write<unsigned char>(unsigned char, unsigned long) Unexecuted instantiation: void flexbuffers::Builder::Write<double>(double, unsigned long) Unexecuted instantiation: void flexbuffers::Builder::Write<float>(float, unsigned long) Unexecuted instantiation: void flexbuffers::Builder::Write<unsigned long>(unsigned long, unsigned long) Unexecuted instantiation: void flexbuffers::Builder::Write<long>(long, unsigned long) |
1447 | | |
1448 | 0 | void WriteDouble(double f, uint8_t byte_width) { |
1449 | 0 | switch (byte_width) { |
1450 | 0 | case 8: Write(f, byte_width); break; |
1451 | 0 | case 4: Write(static_cast<float>(f), byte_width); break; |
1452 | 0 | // case 2: Write(static_cast<half>(f), byte_width); break; |
1453 | 0 | // case 1: Write(static_cast<quarter>(f), byte_width); break; |
1454 | 0 | default: FLATBUFFERS_ASSERT(0); |
1455 | 0 | } |
1456 | 0 | } |
1457 | | |
1458 | 0 | void WriteOffset(uint64_t o, uint8_t byte_width) { |
1459 | 0 | auto reloff = buf_.size() - o; |
1460 | 0 | FLATBUFFERS_ASSERT(byte_width == 8 || reloff < 1ULL << (byte_width * 8)); |
1461 | 0 | Write(reloff, byte_width); |
1462 | 0 | } |
1463 | | |
1464 | 0 | template<typename T> void PushIndirect(T val, Type type, BitWidth bit_width) { |
1465 | 0 | auto byte_width = Align(bit_width); |
1466 | 0 | auto iloc = buf_.size(); |
1467 | 0 | Write(val, byte_width); |
1468 | 0 | stack_.push_back(Value(static_cast<uint64_t>(iloc), type, bit_width)); |
1469 | 0 | } Unexecuted instantiation: void flexbuffers::Builder::PushIndirect<long>(long, flexbuffers::Type, flexbuffers::BitWidth) Unexecuted instantiation: void flexbuffers::Builder::PushIndirect<unsigned long>(unsigned long, flexbuffers::Type, flexbuffers::BitWidth) Unexecuted instantiation: void flexbuffers::Builder::PushIndirect<float>(float, flexbuffers::Type, flexbuffers::BitWidth) Unexecuted instantiation: void flexbuffers::Builder::PushIndirect<double>(double, flexbuffers::Type, flexbuffers::BitWidth) |
1470 | | |
1471 | 0 | static BitWidth WidthB(size_t byte_width) { |
1472 | 0 | switch (byte_width) { |
1473 | 0 | case 1: return BIT_WIDTH_8; |
1474 | 0 | case 2: return BIT_WIDTH_16; |
1475 | 0 | case 4: return BIT_WIDTH_32; |
1476 | 0 | case 8: return BIT_WIDTH_64; |
1477 | 0 | default: FLATBUFFERS_ASSERT(false); return BIT_WIDTH_64; |
1478 | 0 | } |
1479 | 0 | } |
1480 | | |
1481 | | template<typename T> static Type GetScalarType() { |
1482 | | static_assert(flatbuffers::is_scalar<T>::value, "Unrelated types"); |
1483 | | return flatbuffers::is_floating_point<T>::value ? FBT_FLOAT |
1484 | | : flatbuffers::is_same<T, bool>::value |
1485 | | ? FBT_BOOL |
1486 | | : (flatbuffers::is_unsigned<T>::value ? FBT_UINT : FBT_INT); |
1487 | | } |
1488 | | |
1489 | | public: |
1490 | | // This was really intended to be private, except for LastValue/ReuseValue. |
1491 | | struct Value { |
1492 | | union { |
1493 | | int64_t i_; |
1494 | | uint64_t u_; |
1495 | | double f_; |
1496 | | }; |
1497 | | |
1498 | | Type type_; |
1499 | | |
1500 | | // For scalars: of itself, for vector: of its elements, for string: length. |
1501 | | BitWidth min_bit_width_; |
1502 | | |
1503 | 0 | Value() : i_(0), type_(FBT_NULL), min_bit_width_(BIT_WIDTH_8) {} |
1504 | | |
1505 | | Value(bool b) |
1506 | | : u_(static_cast<uint64_t>(b)), |
1507 | | type_(FBT_BOOL), |
1508 | 0 | min_bit_width_(BIT_WIDTH_8) {} |
1509 | | |
1510 | | Value(int64_t i, Type t, BitWidth bw) |
1511 | 0 | : i_(i), type_(t), min_bit_width_(bw) {} |
1512 | | Value(uint64_t u, Type t, BitWidth bw) |
1513 | 0 | : u_(u), type_(t), min_bit_width_(bw) {} |
1514 | | |
1515 | | Value(float f) |
1516 | | : f_(static_cast<double>(f)), |
1517 | | type_(FBT_FLOAT), |
1518 | 0 | min_bit_width_(BIT_WIDTH_32) {} |
1519 | 0 | Value(double f) : f_(f), type_(FBT_FLOAT), min_bit_width_(WidthF(f)) {} |
1520 | | |
1521 | 0 | uint8_t StoredPackedType(BitWidth parent_bit_width_ = BIT_WIDTH_8) const { |
1522 | 0 | return PackedType(StoredWidth(parent_bit_width_), type_); |
1523 | 0 | } |
1524 | | |
1525 | 0 | BitWidth ElemWidth(size_t buf_size, size_t elem_index) const { |
1526 | 0 | if (IsInline(type_)) { |
1527 | 0 | return min_bit_width_; |
1528 | 0 | } else { |
1529 | 0 | // We have an absolute offset, but want to store a relative offset |
1530 | 0 | // elem_index elements beyond the current buffer end. Since whether |
1531 | 0 | // the relative offset fits in a certain byte_width depends on |
1532 | 0 | // the size of the elements before it (and their alignment), we have |
1533 | 0 | // to test for each size in turn. |
1534 | 0 | for (size_t byte_width = 1; |
1535 | 0 | byte_width <= sizeof(flatbuffers::largest_scalar_t); |
1536 | 0 | byte_width *= 2) { |
1537 | 0 | // Where are we going to write this offset? |
1538 | 0 | auto offset_loc = buf_size + |
1539 | 0 | flatbuffers::PaddingBytes(buf_size, byte_width) + |
1540 | 0 | elem_index * byte_width; |
1541 | 0 | // Compute relative offset. |
1542 | 0 | auto offset = offset_loc - u_; |
1543 | 0 | // Does it fit? |
1544 | 0 | auto bit_width = WidthU(offset); |
1545 | 0 | if (static_cast<size_t>(static_cast<size_t>(1U) << bit_width) == |
1546 | 0 | byte_width) |
1547 | 0 | return bit_width; |
1548 | 0 | } |
1549 | 0 | FLATBUFFERS_ASSERT(false); // Must match one of the sizes above. |
1550 | 0 | return BIT_WIDTH_64; |
1551 | 0 | } |
1552 | 0 | } |
1553 | | |
1554 | 0 | BitWidth StoredWidth(BitWidth parent_bit_width_ = BIT_WIDTH_8) const { |
1555 | 0 | if (IsInline(type_)) { |
1556 | 0 | return (std::max)(min_bit_width_, parent_bit_width_); |
1557 | 0 | } else { |
1558 | 0 | return min_bit_width_; |
1559 | 0 | } |
1560 | 0 | } |
1561 | | }; |
1562 | | |
1563 | | private: |
1564 | 0 | void WriteAny(const Value &val, uint8_t byte_width) { |
1565 | 0 | switch (val.type_) { |
1566 | 0 | case FBT_NULL: |
1567 | 0 | case FBT_INT: Write(val.i_, byte_width); break; |
1568 | 0 | case FBT_BOOL: |
1569 | 0 | case FBT_UINT: Write(val.u_, byte_width); break; |
1570 | 0 | case FBT_FLOAT: WriteDouble(val.f_, byte_width); break; |
1571 | 0 | default: WriteOffset(val.u_, byte_width); break; |
1572 | 0 | } |
1573 | 0 | } |
1574 | | |
1575 | 0 | size_t CreateBlob(const void *data, size_t len, size_t trailing, Type type) { |
1576 | 0 | auto bit_width = WidthU(len); |
1577 | 0 | auto byte_width = Align(bit_width); |
1578 | 0 | Write<uint64_t>(len, byte_width); |
1579 | 0 | auto sloc = buf_.size(); |
1580 | 0 | WriteBytes(data, len + trailing); |
1581 | 0 | stack_.push_back(Value(static_cast<uint64_t>(sloc), type, bit_width)); |
1582 | 0 | return sloc; |
1583 | 0 | } |
1584 | | |
1585 | | template<typename T> |
1586 | | size_t ScalarVector(const T *elems, size_t len, bool fixed) { |
1587 | | auto vector_type = GetScalarType<T>(); |
1588 | | auto byte_width = sizeof(T); |
1589 | | auto bit_width = WidthB(byte_width); |
1590 | | // If you get this assert, you're trying to write a vector with a size |
1591 | | // field that is bigger than the scalars you're trying to write (e.g. a |
1592 | | // byte vector > 255 elements). For such types, write a "blob" instead. |
1593 | | // TODO: instead of asserting, could write vector with larger elements |
1594 | | // instead, though that would be wasteful. |
1595 | | FLATBUFFERS_ASSERT(WidthU(len) <= bit_width); |
1596 | | Align(bit_width); |
1597 | | if (!fixed) Write<uint64_t>(len, byte_width); |
1598 | | auto vloc = buf_.size(); |
1599 | | for (size_t i = 0; i < len; i++) Write(elems[i], byte_width); |
1600 | | stack_.push_back(Value(static_cast<uint64_t>(vloc), |
1601 | | ToTypedVector(vector_type, fixed ? len : 0), |
1602 | | bit_width)); |
1603 | | return vloc; |
1604 | | } |
1605 | | |
1606 | | Value CreateVector(size_t start, size_t vec_len, size_t step, bool typed, |
1607 | 0 | bool fixed, const Value *keys = nullptr) { |
1608 | 0 | FLATBUFFERS_ASSERT( |
1609 | 0 | !fixed || |
1610 | 0 | typed); // typed=false, fixed=true combination is not supported. |
1611 | 0 | // Figure out smallest bit width we can store this vector with. |
1612 | 0 | auto bit_width = (std::max)(force_min_bit_width_, WidthU(vec_len)); |
1613 | 0 | auto prefix_elems = 1; |
1614 | 0 | if (keys) { |
1615 | 0 | // If this vector is part of a map, we will pre-fix an offset to the keys |
1616 | 0 | // to this vector. |
1617 | 0 | bit_width = (std::max)(bit_width, keys->ElemWidth(buf_.size(), 0)); |
1618 | 0 | prefix_elems += 2; |
1619 | 0 | } |
1620 | 0 | Type vector_type = FBT_KEY; |
1621 | 0 | // Check bit widths and types for all elements. |
1622 | 0 | for (size_t i = start; i < stack_.size(); i += step) { |
1623 | 0 | auto elem_width = |
1624 | 0 | stack_[i].ElemWidth(buf_.size(), i - start + prefix_elems); |
1625 | 0 | bit_width = (std::max)(bit_width, elem_width); |
1626 | 0 | if (typed) { |
1627 | 0 | if (i == start) { |
1628 | 0 | vector_type = stack_[i].type_; |
1629 | 0 | } else { |
1630 | 0 | // If you get this assert, you are writing a typed vector with |
1631 | 0 | // elements that are not all the same type. |
1632 | 0 | FLATBUFFERS_ASSERT(vector_type == stack_[i].type_); |
1633 | 0 | } |
1634 | 0 | } |
1635 | 0 | } |
1636 | 0 | // If you get this assert, your typed types are not one of: |
1637 | 0 | // Int / UInt / Float / Key. |
1638 | 0 | FLATBUFFERS_ASSERT(!typed || IsTypedVectorElementType(vector_type)); |
1639 | 0 | auto byte_width = Align(bit_width); |
1640 | 0 | // Write vector. First the keys width/offset if available, and size. |
1641 | 0 | if (keys) { |
1642 | 0 | WriteOffset(keys->u_, byte_width); |
1643 | 0 | Write<uint64_t>(1ULL << keys->min_bit_width_, byte_width); |
1644 | 0 | } |
1645 | 0 | if (!fixed) Write<uint64_t>(vec_len, byte_width); |
1646 | 0 | // Then the actual data. |
1647 | 0 | auto vloc = buf_.size(); |
1648 | 0 | for (size_t i = start; i < stack_.size(); i += step) { |
1649 | 0 | WriteAny(stack_[i], byte_width); |
1650 | 0 | } |
1651 | 0 | // Then the types. |
1652 | 0 | if (!typed) { |
1653 | 0 | for (size_t i = start; i < stack_.size(); i += step) { |
1654 | 0 | buf_.push_back(stack_[i].StoredPackedType(bit_width)); |
1655 | 0 | } |
1656 | 0 | } |
1657 | 0 | return Value(static_cast<uint64_t>(vloc), |
1658 | 0 | keys ? FBT_MAP |
1659 | 0 | : (typed ? ToTypedVector(vector_type, fixed ? vec_len : 0) |
1660 | 0 | : FBT_VECTOR), |
1661 | 0 | bit_width); |
1662 | 0 | } |
1663 | | |
1664 | | // You shouldn't really be copying instances of this class. |
1665 | | Builder(const Builder &); |
1666 | | Builder &operator=(const Builder &); |
1667 | | |
1668 | | std::vector<uint8_t> buf_; |
1669 | | std::vector<Value> stack_; |
1670 | | |
1671 | | bool finished_; |
1672 | | bool has_duplicate_keys_; |
1673 | | |
1674 | | BuilderFlag flags_; |
1675 | | |
1676 | | BitWidth force_min_bit_width_; |
1677 | | |
1678 | | struct KeyOffsetCompare { |
1679 | 0 | explicit KeyOffsetCompare(const std::vector<uint8_t> &buf) : buf_(&buf) {} |
1680 | 0 | bool operator()(size_t a, size_t b) const { |
1681 | 0 | auto stra = reinterpret_cast<const char *>(buf_->data() + a); |
1682 | 0 | auto strb = reinterpret_cast<const char *>(buf_->data() + b); |
1683 | 0 | return strcmp(stra, strb) < 0; |
1684 | 0 | } |
1685 | | const std::vector<uint8_t> *buf_; |
1686 | | }; |
1687 | | |
1688 | | typedef std::pair<size_t, size_t> StringOffset; |
1689 | | struct StringOffsetCompare { |
1690 | | explicit StringOffsetCompare(const std::vector<uint8_t> &buf) |
1691 | 0 | : buf_(&buf) {} |
1692 | 0 | bool operator()(const StringOffset &a, const StringOffset &b) const { |
1693 | 0 | auto stra = buf_->data() + a.first; |
1694 | 0 | auto strb = buf_->data() + b.first; |
1695 | 0 | auto cr = memcmp(stra, strb, (std::min)(a.second, b.second) + 1); |
1696 | 0 | return cr < 0 || (cr == 0 && a.second < b.second); |
1697 | 0 | } |
1698 | | const std::vector<uint8_t> *buf_; |
1699 | | }; |
1700 | | |
1701 | | typedef std::set<size_t, KeyOffsetCompare> KeyOffsetMap; |
1702 | | typedef std::set<StringOffset, StringOffsetCompare> StringOffsetMap; |
1703 | | |
1704 | | KeyOffsetMap key_pool; |
1705 | | StringOffsetMap string_pool; |
1706 | | |
1707 | | friend class Verifier; |
1708 | | }; |
1709 | | |
1710 | | // Helper class to verify the integrity of a FlexBuffer |
1711 | | class Verifier FLATBUFFERS_FINAL_CLASS { |
1712 | | public: |
1713 | | Verifier(const uint8_t *buf, size_t buf_len, |
1714 | | // Supplying this vector likely results in faster verification |
1715 | | // of larger buffers with many shared keys/strings, but |
1716 | | // comes at the cost of using additional memory the same size of |
1717 | | // the buffer being verified, so it is by default off. |
1718 | | std::vector<uint8_t> *reuse_tracker = nullptr, |
1719 | | bool _check_alignment = true, size_t max_depth = 64) |
1720 | 2.47k | : buf_(buf), |
1721 | 2.47k | size_(buf_len), |
1722 | 2.47k | depth_(0), |
1723 | 2.47k | max_depth_(max_depth), |
1724 | 2.47k | num_vectors_(0), |
1725 | 2.47k | max_vectors_(buf_len), |
1726 | 2.47k | check_alignment_(_check_alignment), |
1727 | 2.47k | reuse_tracker_(reuse_tracker) { |
1728 | 2.47k | FLATBUFFERS_ASSERT(static_cast<int32_t>(size_) < FLATBUFFERS_MAX_BUFFER_SIZE); |
1729 | 2.47k | if (reuse_tracker_) { |
1730 | 0 | reuse_tracker_->clear(); |
1731 | 0 | reuse_tracker_->resize(size_, PackedType(BIT_WIDTH_8, FBT_NULL)); |
1732 | 0 | } |
1733 | 2.47k | } |
1734 | | |
1735 | | private: |
1736 | | // Central location where any verification failures register. |
1737 | 10.3M | bool Check(bool ok) const { |
1738 | | // clang-format off |
1739 | | #ifdef FLATBUFFERS_DEBUG_VERIFICATION_FAILURE |
1740 | | FLATBUFFERS_ASSERT(ok); |
1741 | | #endif |
1742 | | // clang-format on |
1743 | 10.3M | return ok; |
1744 | 10.3M | } |
1745 | | |
1746 | | // Verify any range within the buffer. |
1747 | 1.82M | bool VerifyFrom(size_t elem, size_t elem_len) const { |
1748 | 1.82M | return Check(elem_len < size_ && elem <= size_ - elem_len); |
1749 | 1.82M | } |
1750 | 913k | bool VerifyBefore(size_t elem, size_t elem_len) const { |
1751 | 913k | return Check(elem_len <= elem); |
1752 | 913k | } |
1753 | | |
1754 | 1.82M | bool VerifyFromPointer(const uint8_t *p, size_t len) { |
1755 | 1.82M | auto o = static_cast<size_t>(p - buf_); |
1756 | 1.82M | return VerifyFrom(o, len); |
1757 | 1.82M | } |
1758 | 913k | bool VerifyBeforePointer(const uint8_t *p, size_t len) { |
1759 | 913k | auto o = static_cast<size_t>(p - buf_); |
1760 | 913k | return VerifyBefore(o, len); |
1761 | 913k | } |
1762 | | |
1763 | 1.83M | bool VerifyByteWidth(size_t width) { |
1764 | 1.83M | return Check(width == 1 || width == 2 || width == 4 || width == 8); |
1765 | 1.83M | } |
1766 | | |
1767 | 1.83M | bool VerifyType(int type) { return Check(type >= 0 && type < FBT_MAX_TYPE); } |
1768 | | |
1769 | 1.03M | bool VerifyOffset(uint64_t off, const uint8_t *p) { |
1770 | 1.03M | return Check(off <= static_cast<uint64_t>(size_)) && |
1771 | 1.03M | off <= static_cast<uint64_t>(p - buf_); |
1772 | 1.03M | } |
1773 | | |
1774 | 1.03M | bool VerifyAlignment(const uint8_t *p, size_t size) const { |
1775 | 1.03M | auto o = static_cast<size_t>(p - buf_); |
1776 | 1.03M | return Check((o & (size - 1)) == 0 || !check_alignment_); |
1777 | 1.03M | } |
1778 | | |
1779 | | // Macro, since we want to escape from parent function & use lazy args. |
1780 | | #define FLEX_CHECK_VERIFIED(P, PACKED_TYPE) \ |
1781 | 1.01M | if (reuse_tracker_) { \ |
1782 | 0 | auto packed_type = PACKED_TYPE; \ |
1783 | 0 | auto existing = (*reuse_tracker_)[P - buf_]; \ |
1784 | 0 | if (existing == packed_type) return true; \ |
1785 | 0 | /* Fail verification if already set with different type! */ \ |
1786 | 0 | if (!Check(existing == 0)) return false; \ |
1787 | 0 | (*reuse_tracker_)[P - buf_] = packed_type; \ |
1788 | 0 | } |
1789 | | |
1790 | 911k | bool VerifyVector(Reference r, const uint8_t *p, Type elem_type) { |
1791 | | // Any kind of nesting goes thru this function, so guard against that |
1792 | | // here, both with simple nesting checks, and the reuse tracker if on. |
1793 | 911k | depth_++; |
1794 | 911k | num_vectors_++; |
1795 | 911k | if (!Check(depth_ <= max_depth_ && num_vectors_ <= max_vectors_)) |
1796 | 181 | return false; |
1797 | 911k | auto size_byte_width = r.byte_width_; |
1798 | 911k | if (!VerifyBeforePointer(p, size_byte_width)) return false; |
1799 | 911k | FLEX_CHECK_VERIFIED(p - size_byte_width, |
1800 | 911k | PackedType(Builder::WidthB(size_byte_width), r.type_)); |
1801 | 911k | auto sized = Sized(p, size_byte_width); |
1802 | 911k | auto num_elems = sized.size(); |
1803 | 911k | auto elem_byte_width = r.type_ == FBT_STRING || r.type_ == FBT_BLOB |
1804 | 911k | ? uint8_t(1) |
1805 | 911k | : r.byte_width_; |
1806 | 911k | auto max_elems = SIZE_MAX / elem_byte_width; |
1807 | 911k | if (!Check(num_elems < max_elems)) |
1808 | 12 | return false; // Protect against byte_size overflowing. |
1809 | 911k | auto byte_size = num_elems * elem_byte_width; |
1810 | 911k | if (!VerifyFromPointer(p, byte_size)) return false; |
1811 | 911k | if (elem_type == FBT_NULL) { |
1812 | | // Verify type bytes after the vector. |
1813 | 895k | if (!VerifyFromPointer(p + byte_size, num_elems)) return false; |
1814 | 895k | auto v = Vector(p, size_byte_width); |
1815 | 2.60M | for (size_t i = 0; i < num_elems; i++) |
1816 | 1.71M | if (!VerifyRef(v[i])) return false; |
1817 | 895k | } else if (elem_type == FBT_KEY) { |
1818 | 5.10k | auto v = TypedVector(p, elem_byte_width, FBT_KEY); |
1819 | 111k | for (size_t i = 0; i < num_elems; i++) |
1820 | 106k | if (!VerifyRef(v[i])) return false; |
1821 | 10.6k | } else { |
1822 | 10.6k | FLATBUFFERS_ASSERT(IsInline(elem_type)); |
1823 | 10.6k | } |
1824 | 906k | depth_--; |
1825 | 906k | return true; |
1826 | 911k | } |
1827 | | |
1828 | 1.79k | bool VerifyKeys(const uint8_t *p, uint8_t byte_width) { |
1829 | | // The vector part of the map has already been verified. |
1830 | 1.79k | const size_t num_prefixed_fields = 3; |
1831 | 1.79k | if (!VerifyBeforePointer(p, byte_width * num_prefixed_fields)) return false; |
1832 | 1.78k | p -= byte_width * num_prefixed_fields; |
1833 | 1.78k | auto off = ReadUInt64(p, byte_width); |
1834 | 1.78k | if (!VerifyOffset(off, p)) return false; |
1835 | 1.68k | auto key_byte_with = |
1836 | 1.68k | static_cast<uint8_t>(ReadUInt64(p + byte_width, byte_width)); |
1837 | 1.68k | if (!VerifyByteWidth(key_byte_with)) return false; |
1838 | 1.57k | return VerifyVector(Reference(p, byte_width, key_byte_with, FBT_VECTOR_KEY), |
1839 | 1.57k | p - off, FBT_KEY); |
1840 | 1.68k | } |
1841 | | |
1842 | 108k | bool VerifyKey(const uint8_t *p) { |
1843 | 108k | FLEX_CHECK_VERIFIED(p, PackedType(BIT_WIDTH_8, FBT_KEY)); |
1844 | 28.5M | while (p < buf_ + size_) |
1845 | 28.5M | if (*p++) return true; |
1846 | 0 | return false; |
1847 | 108k | } |
1848 | | |
1849 | | #undef FLEX_CHECK_VERIFIED |
1850 | | |
1851 | 3.56k | bool VerifyTerminator(const String &s) { |
1852 | 3.56k | return VerifyFromPointer(reinterpret_cast<const uint8_t *>(s.c_str()), |
1853 | 3.56k | s.size() + 1); |
1854 | 3.56k | } |
1855 | | |
1856 | 1.82M | bool VerifyRef(Reference r) { |
1857 | | // r.parent_width_ and r.data_ already verified. |
1858 | 1.82M | if (!VerifyByteWidth(r.byte_width_) || !VerifyType(r.type_)) { |
1859 | 24 | return false; |
1860 | 24 | } |
1861 | 1.82M | if (IsInline(r.type_)) { |
1862 | | // Inline scalars, don't require further verification. |
1863 | 792k | return true; |
1864 | 792k | } |
1865 | | // All remaining types are an offset. |
1866 | 1.03M | auto off = ReadUInt64(r.data_, r.parent_width_); |
1867 | 1.03M | if (!VerifyOffset(off, r.data_)) return false; |
1868 | 1.03M | auto p = r.Indirect(); |
1869 | 1.03M | if (!VerifyAlignment(p, r.byte_width_)) return false; |
1870 | 1.03M | switch (r.type_) { |
1871 | 1.04k | case FBT_INDIRECT_INT: |
1872 | 2.47k | case FBT_INDIRECT_UINT: |
1873 | 5.83k | case FBT_INDIRECT_FLOAT: return VerifyFromPointer(p, r.byte_width_); |
1874 | 108k | case FBT_KEY: return VerifyKey(p); |
1875 | 5.60k | case FBT_MAP: |
1876 | 5.60k | return VerifyVector(r, p, FBT_NULL) && VerifyKeys(p, r.byte_width_); |
1877 | 890k | case FBT_VECTOR: return VerifyVector(r, p, FBT_NULL); |
1878 | 2.06k | case FBT_VECTOR_INT: return VerifyVector(r, p, FBT_INT); |
1879 | 310 | case FBT_VECTOR_BOOL: |
1880 | 3.13k | case FBT_VECTOR_UINT: return VerifyVector(r, p, FBT_UINT); |
1881 | 829 | case FBT_VECTOR_FLOAT: return VerifyVector(r, p, FBT_FLOAT); |
1882 | 2.29k | case FBT_VECTOR_KEY: return VerifyVector(r, p, FBT_KEY); |
1883 | 1.28k | case FBT_VECTOR_STRING_DEPRECATED: |
1884 | | // Use of FBT_KEY here intentional, see elsewhere. |
1885 | 1.28k | return VerifyVector(r, p, FBT_KEY); |
1886 | 1.25k | case FBT_BLOB: return VerifyVector(r, p, FBT_UINT); |
1887 | 3.61k | case FBT_STRING: |
1888 | 3.61k | return VerifyVector(r, p, FBT_UINT) && |
1889 | 3.61k | VerifyTerminator(String(p, r.byte_width_)); |
1890 | 1.81k | case FBT_VECTOR_INT2: |
1891 | 2.64k | case FBT_VECTOR_UINT2: |
1892 | 3.99k | case FBT_VECTOR_FLOAT2: |
1893 | 5.13k | case FBT_VECTOR_INT3: |
1894 | 5.96k | case FBT_VECTOR_UINT3: |
1895 | 7.03k | case FBT_VECTOR_FLOAT3: |
1896 | 7.96k | case FBT_VECTOR_INT4: |
1897 | 9.17k | case FBT_VECTOR_UINT4: |
1898 | 10.2k | case FBT_VECTOR_FLOAT4: { |
1899 | 10.2k | uint8_t len = 0; |
1900 | 10.2k | auto vtype = ToFixedTypedVectorElementType(r.type_, &len); |
1901 | 10.2k | if (!VerifyType(vtype)) return false; |
1902 | 10.2k | return VerifyFromPointer(p, static_cast<size_t>(r.byte_width_) * len); |
1903 | 10.2k | } |
1904 | 9 | default: return false; |
1905 | 1.03M | } |
1906 | 1.03M | } |
1907 | | |
1908 | | public: |
1909 | 2.47k | bool VerifyBuffer() { |
1910 | 2.47k | if (!Check(size_ >= 3)) return false; |
1911 | 2.47k | auto end = buf_ + size_; |
1912 | 2.47k | auto byte_width = *--end; |
1913 | 2.47k | auto packed_type = *--end; |
1914 | 2.47k | return VerifyByteWidth(byte_width) && Check(end - buf_ >= byte_width) && |
1915 | 2.47k | VerifyRef(Reference(end - byte_width, byte_width, packed_type)); |
1916 | 2.47k | } |
1917 | | |
1918 | | private: |
1919 | | const uint8_t *buf_; |
1920 | | size_t size_; |
1921 | | size_t depth_; |
1922 | | const size_t max_depth_; |
1923 | | size_t num_vectors_; |
1924 | | const size_t max_vectors_; |
1925 | | bool check_alignment_; |
1926 | | std::vector<uint8_t> *reuse_tracker_; |
1927 | | }; |
1928 | | |
1929 | | // Utility function that constructs the Verifier for you, see above for |
1930 | | // parameters. |
1931 | | inline bool VerifyBuffer(const uint8_t *buf, size_t buf_len, |
1932 | 2.47k | std::vector<uint8_t> *reuse_tracker = nullptr) { |
1933 | 2.47k | Verifier verifier(buf, buf_len, reuse_tracker); |
1934 | 2.47k | return verifier.VerifyBuffer(); |
1935 | 2.47k | } |
1936 | | |
1937 | | } // namespace flexbuffers |
1938 | | |
1939 | | #if defined(_MSC_VER) |
1940 | | # pragma warning(pop) |
1941 | | #endif |
1942 | | |
1943 | | #endif // FLATBUFFERS_FLEXBUFFERS_H_ |