1/*
2 * Copyright 2017 Google Inc. All rights reserved.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef FLATBUFFERS_FLEXBUFFERS_H_
18#define FLATBUFFERS_FLEXBUFFERS_H_
19
20#include <map>
21// Used to select STL variant.
22#include "flatbuffers/base.h"
23// We use the basic binary writing functions from the regular FlatBuffers.
24#include "flatbuffers/util.h"
25
26#ifdef _MSC_VER
27# include <intrin.h>
28#endif
29
30#if defined(_MSC_VER)
31# pragma warning(push)
32# pragma warning(disable : 4127) // C4127: conditional expression is constant
33#endif
34
35namespace flexbuffers {
36
37class Reference;
38class Map;
39
40// These are used in the lower 2 bits of a type field to determine the size of
41// the elements (and or size field) of the item pointed to (e.g. vector).
42enum BitWidth {
43 BIT_WIDTH_8 = 0,
44 BIT_WIDTH_16 = 1,
45 BIT_WIDTH_32 = 2,
46 BIT_WIDTH_64 = 3,
47};
48
49// These are used as the upper 6 bits of a type field to indicate the actual
50// type.
51enum Type {
52 FBT_NULL = 0,
53 FBT_INT = 1,
54 FBT_UINT = 2,
55 FBT_FLOAT = 3,
56 // Types above stored inline, types below store an offset.
57 FBT_KEY = 4,
58 FBT_STRING = 5,
59 FBT_INDIRECT_INT = 6,
60 FBT_INDIRECT_UINT = 7,
61 FBT_INDIRECT_FLOAT = 8,
62 FBT_MAP = 9,
63 FBT_VECTOR = 10, // Untyped.
64 FBT_VECTOR_INT = 11, // Typed any size (stores no type table).
65 FBT_VECTOR_UINT = 12,
66 FBT_VECTOR_FLOAT = 13,
67 FBT_VECTOR_KEY = 14,
68 // DEPRECATED, use FBT_VECTOR or FBT_VECTOR_KEY instead.
69 // Read test.cpp/FlexBuffersDeprecatedTest() for details on why.
70 FBT_VECTOR_STRING_DEPRECATED = 15,
71 FBT_VECTOR_INT2 = 16, // Typed tuple (no type table, no size field).
72 FBT_VECTOR_UINT2 = 17,
73 FBT_VECTOR_FLOAT2 = 18,
74 FBT_VECTOR_INT3 = 19, // Typed triple (no type table, no size field).
75 FBT_VECTOR_UINT3 = 20,
76 FBT_VECTOR_FLOAT3 = 21,
77 FBT_VECTOR_INT4 = 22, // Typed quad (no type table, no size field).
78 FBT_VECTOR_UINT4 = 23,
79 FBT_VECTOR_FLOAT4 = 24,
80 FBT_BLOB = 25,
81 FBT_BOOL = 26,
82 FBT_VECTOR_BOOL =
83 36, // To Allow the same type of conversion of type to vector type
84};
85
86inline bool IsInline(Type t) { return t <= FBT_FLOAT || t == FBT_BOOL; }
87
88inline bool IsTypedVectorElementType(Type t) {
89 return (t >= FBT_INT && t <= FBT_STRING) || t == FBT_BOOL;
90}
91
92inline bool IsTypedVector(Type t) {
93 return (t >= FBT_VECTOR_INT && t <= FBT_VECTOR_STRING_DEPRECATED) ||
94 t == FBT_VECTOR_BOOL;
95}
96
97inline bool IsFixedTypedVector(Type t) {
98 return t >= FBT_VECTOR_INT2 && t <= FBT_VECTOR_FLOAT4;
99}
100
101inline Type ToTypedVector(Type t, size_t fixed_len = 0) {
102 FLATBUFFERS_ASSERT(IsTypedVectorElementType(t));
103 switch (fixed_len) {
104 case 0: return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT);
105 case 2: return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT2);
106 case 3: return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT3);
107 case 4: return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT4);
108 default: FLATBUFFERS_ASSERT(0); return FBT_NULL;
109 }
110}
111
112inline Type ToTypedVectorElementType(Type t) {
113 FLATBUFFERS_ASSERT(IsTypedVector(t));
114 return static_cast<Type>(t - FBT_VECTOR_INT + FBT_INT);
115}
116
117inline Type ToFixedTypedVectorElementType(Type t, uint8_t *len) {
118 FLATBUFFERS_ASSERT(IsFixedTypedVector(t));
119 auto fixed_type = t - FBT_VECTOR_INT2;
120 *len = static_cast<uint8_t>(fixed_type / 3 +
121 2); // 3 types each, starting from length 2.
122 return static_cast<Type>(fixed_type % 3 + FBT_INT);
123}
124
125// TODO: implement proper support for 8/16bit floats, or decide not to
126// support them.
127typedef int16_t half;
128typedef int8_t quarter;
129
130// TODO: can we do this without conditionals using intrinsics or inline asm
131// on some platforms? Given branch prediction the method below should be
132// decently quick, but it is the most frequently executed function.
133// We could do an (unaligned) 64-bit read if we ifdef out the platforms for
134// which that doesn't work (or where we'd read into un-owned memory).
135template<typename R, typename T1, typename T2, typename T4, typename T8>
136R ReadSizedScalar(const uint8_t *data, uint8_t byte_width) {
137 return byte_width < 4
138 ? (byte_width < 2
139 ? static_cast<R>(flatbuffers::ReadScalar<T1>(data))
140 : static_cast<R>(flatbuffers::ReadScalar<T2>(data)))
141 : (byte_width < 8
142 ? static_cast<R>(flatbuffers::ReadScalar<T4>(data))
143 : static_cast<R>(flatbuffers::ReadScalar<T8>(data)));
144}
145
146inline int64_t ReadInt64(const uint8_t *data, uint8_t byte_width) {
147 return ReadSizedScalar<int64_t, int8_t, int16_t, int32_t, int64_t>(
148 data, byte_width);
149}
150
151inline uint64_t ReadUInt64(const uint8_t *data, uint8_t byte_width) {
152 // This is the "hottest" function (all offset lookups use this), so worth
153 // optimizing if possible.
154 // TODO: GCC apparently replaces memcpy by a rep movsb, but only if count is a
155 // constant, which here it isn't. Test if memcpy is still faster than
156 // the conditionals in ReadSizedScalar. Can also use inline asm.
157 // clang-format off
158 #if defined(_MSC_VER) && ((defined(_M_X64) && !defined(_M_ARM64EC)) || defined _M_IX86)
159 uint64_t u = 0;
160 __movsb(reinterpret_cast<uint8_t *>(&u),
161 reinterpret_cast<const uint8_t *>(data), byte_width);
162 return flatbuffers::EndianScalar(u);
163 #else
164 return ReadSizedScalar<uint64_t, uint8_t, uint16_t, uint32_t, uint64_t>(
165 data, byte_width);
166 #endif
167 // clang-format on
168}
169
170inline double ReadDouble(const uint8_t *data, uint8_t byte_width) {
171 return ReadSizedScalar<double, quarter, half, float, double>(data,
172 byte_width);
173}
174
175inline const uint8_t *Indirect(const uint8_t *offset, uint8_t byte_width) {
176 return offset - ReadUInt64(offset, byte_width);
177}
178
179template<typename T> const uint8_t *Indirect(const uint8_t *offset) {
180 return offset - flatbuffers::ReadScalar<T>(offset);
181}
182
183inline BitWidth WidthU(uint64_t u) {
184#define FLATBUFFERS_GET_FIELD_BIT_WIDTH(value, width) \
185 { \
186 if (!((u) & ~((1ULL << (width)) - 1ULL))) return BIT_WIDTH_##width; \
187 }
188 FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 8);
189 FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 16);
190 FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 32);
191#undef FLATBUFFERS_GET_FIELD_BIT_WIDTH
192 return BIT_WIDTH_64;
193}
194
195inline BitWidth WidthI(int64_t i) {
196 auto u = static_cast<uint64_t>(i) << 1;
197 return WidthU(i >= 0 ? u : ~u);
198}
199
200inline BitWidth WidthF(double f) {
201 return static_cast<double>(static_cast<float>(f)) == f ? BIT_WIDTH_32
202 : BIT_WIDTH_64;
203}
204
205// Base class of all types below.
206// Points into the data buffer and allows access to one type.
207class Object {
208 public:
209 Object(const uint8_t *data, uint8_t byte_width)
210 : data_(data), byte_width_(byte_width) {}
211
212 protected:
213 const uint8_t *data_;
214 uint8_t byte_width_;
215};
216
217// Object that has a size, obtained either from size prefix, or elsewhere.
218class Sized : public Object {
219 public:
220 // Size prefix.
221 Sized(const uint8_t *data, uint8_t byte_width)
222 : Object(data, byte_width), size_(read_size()) {}
223 // Manual size.
224 Sized(const uint8_t *data, uint8_t byte_width, size_t sz)
225 : Object(data, byte_width), size_(sz) {}
226 size_t size() const { return size_; }
227 // Access size stored in `byte_width_` bytes before data_ pointer.
228 size_t read_size() const {
229 return static_cast<size_t>(ReadUInt64(data_ - byte_width_, byte_width_));
230 }
231
232 protected:
233 size_t size_;
234};
235
236class String : public Sized {
237 public:
238 // Size prefix.
239 String(const uint8_t *data, uint8_t byte_width) : Sized(data, byte_width) {}
240 // Manual size.
241 String(const uint8_t *data, uint8_t byte_width, size_t sz)
242 : Sized(data, byte_width, sz) {}
243
244 size_t length() const { return size(); }
245 const char *c_str() const { return reinterpret_cast<const char *>(data_); }
246 std::string str() const { return std::string(c_str(), size()); }
247
248 static String EmptyString() {
249 static const char *empty_string = "";
250 return String(reinterpret_cast<const uint8_t *>(empty_string), 1, 0);
251 }
252 bool IsTheEmptyString() const { return data_ == EmptyString().data_; }
253};
254
255class Blob : public Sized {
256 public:
257 Blob(const uint8_t *data_buf, uint8_t byte_width)
258 : Sized(data_buf, byte_width) {}
259
260 static Blob EmptyBlob() {
261 static const uint8_t empty_blob[] = { 0 /*len*/ };
262 return Blob(empty_blob + 1, 1);
263 }
264 bool IsTheEmptyBlob() const { return data_ == EmptyBlob().data_; }
265 const uint8_t *data() const { return data_; }
266};
267
268class Vector : public Sized {
269 public:
270 Vector(const uint8_t *data, uint8_t byte_width) : Sized(data, byte_width) {}
271
272 Reference operator[](size_t i) const;
273
274 static Vector EmptyVector() {
275 static const uint8_t empty_vector[] = { 0 /*len*/ };
276 return Vector(empty_vector + 1, 1);
277 }
278 bool IsTheEmptyVector() const { return data_ == EmptyVector().data_; }
279};
280
281class TypedVector : public Sized {
282 public:
283 TypedVector(const uint8_t *data, uint8_t byte_width, Type element_type)
284 : Sized(data, byte_width), type_(element_type) {}
285
286 Reference operator[](size_t i) const;
287
288 static TypedVector EmptyTypedVector() {
289 static const uint8_t empty_typed_vector[] = { 0 /*len*/ };
290 return TypedVector(empty_typed_vector + 1, 1, FBT_INT);
291 }
292 bool IsTheEmptyVector() const {
293 return data_ == TypedVector::EmptyTypedVector().data_;
294 }
295
296 Type ElementType() { return type_; }
297
298 friend Reference;
299
300 private:
301 Type type_;
302
303 friend Map;
304};
305
306class FixedTypedVector : public Object {
307 public:
308 FixedTypedVector(const uint8_t *data, uint8_t byte_width, Type element_type,
309 uint8_t len)
310 : Object(data, byte_width), type_(element_type), len_(len) {}
311
312 Reference operator[](size_t i) const;
313
314 static FixedTypedVector EmptyFixedTypedVector() {
315 static const uint8_t fixed_empty_vector[] = { 0 /* unused */ };
316 return FixedTypedVector(fixed_empty_vector, 1, FBT_INT, 0);
317 }
318 bool IsTheEmptyFixedTypedVector() const {
319 return data_ == FixedTypedVector::EmptyFixedTypedVector().data_;
320 }
321
322 Type ElementType() { return type_; }
323 uint8_t size() { return len_; }
324
325 private:
326 Type type_;
327 uint8_t len_;
328};
329
330class Map : public Vector {
331 public:
332 Map(const uint8_t *data, uint8_t byte_width) : Vector(data, byte_width) {}
333
334 Reference operator[](const char *key) const;
335 Reference operator[](const std::string &key) const;
336
337 Vector Values() const { return Vector(data_, byte_width_); }
338
339 TypedVector Keys() const {
340 const size_t num_prefixed_fields = 3;
341 auto keys_offset = data_ - byte_width_ * num_prefixed_fields;
342 return TypedVector(Indirect(keys_offset, byte_width_),
343 static_cast<uint8_t>(
344 ReadUInt64(keys_offset + byte_width_, byte_width_)),
345 FBT_KEY);
346 }
347
348 static Map EmptyMap() {
349 static const uint8_t empty_map[] = {
350 0 /*keys_len*/, 0 /*keys_offset*/, 1 /*keys_width*/, 0 /*len*/
351 };
352 return Map(empty_map + 4, 1);
353 }
354
355 bool IsTheEmptyMap() const { return data_ == EmptyMap().data_; }
356};
357
358template<typename T>
359void AppendToString(std::string &s, T &&v, bool keys_quoted) {
360 s += "[ ";
361 for (size_t i = 0; i < v.size(); i++) {
362 if (i) s += ", ";
363 v[i].ToString(true, keys_quoted, s);
364 }
365 s += " ]";
366}
367
368class Reference {
369 public:
370 Reference()
371 : data_(nullptr),
372 parent_width_(0),
373 byte_width_(BIT_WIDTH_8),
374 type_(FBT_NULL) {}
375
376 Reference(const uint8_t *data, uint8_t parent_width, uint8_t byte_width,
377 Type type)
378 : data_(data),
379 parent_width_(parent_width),
380 byte_width_(byte_width),
381 type_(type) {}
382
383 Reference(const uint8_t *data, uint8_t parent_width, uint8_t packed_type)
384 : data_(data), parent_width_(parent_width) {
385 byte_width_ = 1U << static_cast<BitWidth>(packed_type & 3);
386 type_ = static_cast<Type>(packed_type >> 2);
387 }
388
389 Type GetType() const { return type_; }
390
391 bool IsNull() const { return type_ == FBT_NULL; }
392 bool IsBool() const { return type_ == FBT_BOOL; }
393 bool IsInt() const { return type_ == FBT_INT || type_ == FBT_INDIRECT_INT; }
394 bool IsUInt() const {
395 return type_ == FBT_UINT || type_ == FBT_INDIRECT_UINT;
396 }
397 bool IsIntOrUint() const { return IsInt() || IsUInt(); }
398 bool IsFloat() const {
399 return type_ == FBT_FLOAT || type_ == FBT_INDIRECT_FLOAT;
400 }
401 bool IsNumeric() const { return IsIntOrUint() || IsFloat(); }
402 bool IsString() const { return type_ == FBT_STRING; }
403 bool IsKey() const { return type_ == FBT_KEY; }
404 bool IsVector() const { return type_ == FBT_VECTOR || type_ == FBT_MAP; }
405 bool IsUntypedVector() const { return type_ == FBT_VECTOR; }
406 bool IsTypedVector() const { return flexbuffers::IsTypedVector(type_); }
407 bool IsFixedTypedVector() const {
408 return flexbuffers::IsFixedTypedVector(type_);
409 }
410 bool IsAnyVector() const {
411 return (IsTypedVector() || IsFixedTypedVector() || IsVector());
412 }
413 bool IsMap() const { return type_ == FBT_MAP; }
414 bool IsBlob() const { return type_ == FBT_BLOB; }
415 bool AsBool() const {
416 return (type_ == FBT_BOOL ? ReadUInt64(data_, parent_width_)
417 : AsUInt64()) != 0;
418 }
419
420 // Reads any type as a int64_t. Never fails, does most sensible conversion.
421 // Truncates floats, strings are attempted to be parsed for a number,
422 // vectors/maps return their size. Returns 0 if all else fails.
423 int64_t AsInt64() const {
424 if (type_ == FBT_INT) {
425 // A fast path for the common case.
426 return ReadInt64(data_, parent_width_);
427 } else
428 switch (type_) {
429 case FBT_INDIRECT_INT: return ReadInt64(Indirect(), byte_width_);
430 case FBT_UINT: return ReadUInt64(data_, parent_width_);
431 case FBT_INDIRECT_UINT: return ReadUInt64(Indirect(), byte_width_);
432 case FBT_FLOAT:
433 return static_cast<int64_t>(ReadDouble(data_, parent_width_));
434 case FBT_INDIRECT_FLOAT:
435 return static_cast<int64_t>(ReadDouble(Indirect(), byte_width_));
436 case FBT_NULL: return 0;
437 case FBT_STRING: return flatbuffers::StringToInt(AsString().c_str());
438 case FBT_VECTOR: return static_cast<int64_t>(AsVector().size());
439 case FBT_BOOL: return ReadInt64(data_, parent_width_);
440 default:
441 // Convert other things to int.
442 return 0;
443 }
444 }
445
446 // TODO: could specialize these to not use AsInt64() if that saves
447 // extension ops in generated code, and use a faster op than ReadInt64.
448 int32_t AsInt32() const { return static_cast<int32_t>(AsInt64()); }
449 int16_t AsInt16() const { return static_cast<int16_t>(AsInt64()); }
450 int8_t AsInt8() const { return static_cast<int8_t>(AsInt64()); }
451
452 uint64_t AsUInt64() const {
453 if (type_ == FBT_UINT) {
454 // A fast path for the common case.
455 return ReadUInt64(data_, parent_width_);
456 } else
457 switch (type_) {
458 case FBT_INDIRECT_UINT: return ReadUInt64(Indirect(), byte_width_);
459 case FBT_INT: return ReadInt64(data_, parent_width_);
460 case FBT_INDIRECT_INT: return ReadInt64(Indirect(), byte_width_);
461 case FBT_FLOAT:
462 return static_cast<uint64_t>(ReadDouble(data_, parent_width_));
463 case FBT_INDIRECT_FLOAT:
464 return static_cast<uint64_t>(ReadDouble(Indirect(), byte_width_));
465 case FBT_NULL: return 0;
466 case FBT_STRING: return flatbuffers::StringToUInt(AsString().c_str());
467 case FBT_VECTOR: return static_cast<uint64_t>(AsVector().size());
468 case FBT_BOOL: return ReadUInt64(data_, parent_width_);
469 default:
470 // Convert other things to uint.
471 return 0;
472 }
473 }
474
475 uint32_t AsUInt32() const { return static_cast<uint32_t>(AsUInt64()); }
476 uint16_t AsUInt16() const { return static_cast<uint16_t>(AsUInt64()); }
477 uint8_t AsUInt8() const { return static_cast<uint8_t>(AsUInt64()); }
478
479 double AsDouble() const {
480 if (type_ == FBT_FLOAT) {
481 // A fast path for the common case.
482 return ReadDouble(data_, parent_width_);
483 } else
484 switch (type_) {
485 case FBT_INDIRECT_FLOAT: return ReadDouble(Indirect(), byte_width_);
486 case FBT_INT:
487 return static_cast<double>(ReadInt64(data_, parent_width_));
488 case FBT_UINT:
489 return static_cast<double>(ReadUInt64(data_, parent_width_));
490 case FBT_INDIRECT_INT:
491 return static_cast<double>(ReadInt64(Indirect(), byte_width_));
492 case FBT_INDIRECT_UINT:
493 return static_cast<double>(ReadUInt64(Indirect(), byte_width_));
494 case FBT_NULL: return 0.0;
495 case FBT_STRING: {
496 double d;
497 flatbuffers::StringToNumber(AsString().c_str(), &d);
498 return d;
499 }
500 case FBT_VECTOR: return static_cast<double>(AsVector().size());
501 case FBT_BOOL:
502 return static_cast<double>(ReadUInt64(data_, parent_width_));
503 default:
504 // Convert strings and other things to float.
505 return 0;
506 }
507 }
508
509 float AsFloat() const { return static_cast<float>(AsDouble()); }
510
511 const char *AsKey() const {
512 if (type_ == FBT_KEY || type_ == FBT_STRING) {
513 return reinterpret_cast<const char *>(Indirect());
514 } else {
515 return "";
516 }
517 }
518
519 // This function returns the empty string if you try to read something that
520 // is not a string or key.
521 String AsString() const {
522 if (type_ == FBT_STRING) {
523 return String(Indirect(), byte_width_);
524 } else if (type_ == FBT_KEY) {
525 auto key = Indirect();
526 return String(key, byte_width_,
527 strlen(reinterpret_cast<const char *>(key)));
528 } else {
529 return String::EmptyString();
530 }
531 }
532
533 // Unlike AsString(), this will convert any type to a std::string.
534 std::string ToString() const {
535 std::string s;
536 ToString(false, false, s);
537 return s;
538 }
539
540 // Convert any type to a JSON-like string. strings_quoted determines if
541 // string values at the top level receive "" quotes (inside other values
542 // they always do). keys_quoted determines if keys are quoted, at any level.
543 // TODO(wvo): add further options to have indentation/newlines.
544 void ToString(bool strings_quoted, bool keys_quoted, std::string &s) const {
545 if (type_ == FBT_STRING) {
546 String str(Indirect(), byte_width_);
547 if (strings_quoted) {
548 flatbuffers::EscapeString(str.c_str(), str.length(), &s, true, false);
549 } else {
550 s.append(str.c_str(), str.length());
551 }
552 } else if (IsKey()) {
553 auto str = AsKey();
554 if (keys_quoted) {
555 flatbuffers::EscapeString(str, strlen(str), &s, true, false);
556 } else {
557 s += str;
558 }
559 } else if (IsInt()) {
560 s += flatbuffers::NumToString(AsInt64());
561 } else if (IsUInt()) {
562 s += flatbuffers::NumToString(AsUInt64());
563 } else if (IsFloat()) {
564 s += flatbuffers::NumToString(AsDouble());
565 } else if (IsNull()) {
566 s += "null";
567 } else if (IsBool()) {
568 s += AsBool() ? "true" : "false";
569 } else if (IsMap()) {
570 s += "{ ";
571 auto m = AsMap();
572 auto keys = m.Keys();
573 auto vals = m.Values();
574 for (size_t i = 0; i < keys.size(); i++) {
575 keys[i].ToString(true, keys_quoted, s);
576 s += ": ";
577 vals[i].ToString(true, keys_quoted, s);
578 if (i < keys.size() - 1) s += ", ";
579 }
580 s += " }";
581 } else if (IsVector()) {
582 AppendToString<Vector>(s, AsVector(), keys_quoted);
583 } else if (IsTypedVector()) {
584 AppendToString<TypedVector>(s, AsTypedVector(), keys_quoted);
585 } else if (IsFixedTypedVector()) {
586 AppendToString<FixedTypedVector>(s, AsFixedTypedVector(), keys_quoted);
587 } else if (IsBlob()) {
588 auto blob = AsBlob();
589 flatbuffers::EscapeString(reinterpret_cast<const char *>(blob.data()),
590 blob.size(), &s, true, false);
591 } else {
592 s += "(?)";
593 }
594 }
595
596 // This function returns the empty blob if you try to read a not-blob.
597 // Strings can be viewed as blobs too.
598 Blob AsBlob() const {
599 if (type_ == FBT_BLOB || type_ == FBT_STRING) {
600 return Blob(Indirect(), byte_width_);
601 } else {
602 return Blob::EmptyBlob();
603 }
604 }
605
606 // This function returns the empty vector if you try to read a not-vector.
607 // Maps can be viewed as vectors too.
608 Vector AsVector() const {
609 if (type_ == FBT_VECTOR || type_ == FBT_MAP) {
610 return Vector(Indirect(), byte_width_);
611 } else {
612 return Vector::EmptyVector();
613 }
614 }
615
616 TypedVector AsTypedVector() const {
617 if (IsTypedVector()) {
618 auto tv =
619 TypedVector(Indirect(), byte_width_, ToTypedVectorElementType(type_));
620 if (tv.type_ == FBT_STRING) {
621 // These can't be accessed as strings, since we don't know the bit-width
622 // of the size field, see the declaration of
623 // FBT_VECTOR_STRING_DEPRECATED above for details.
624 // We change the type here to be keys, which are a subtype of strings,
625 // and will ignore the size field. This will truncate strings with
626 // embedded nulls.
627 tv.type_ = FBT_KEY;
628 }
629 return tv;
630 } else {
631 return TypedVector::EmptyTypedVector();
632 }
633 }
634
635 FixedTypedVector AsFixedTypedVector() const {
636 if (IsFixedTypedVector()) {
637 uint8_t len = 0;
638 auto vtype = ToFixedTypedVectorElementType(type_, &len);
639 return FixedTypedVector(Indirect(), byte_width_, vtype, len);
640 } else {
641 return FixedTypedVector::EmptyFixedTypedVector();
642 }
643 }
644
645 Map AsMap() const {
646 if (type_ == FBT_MAP) {
647 return Map(Indirect(), byte_width_);
648 } else {
649 return Map::EmptyMap();
650 }
651 }
652
653 template<typename T> T As() const;
654
655 // Experimental: Mutation functions.
656 // These allow scalars in an already created buffer to be updated in-place.
657 // Since by default scalars are stored in the smallest possible space,
658 // the new value may not fit, in which case these functions return false.
659 // To avoid this, you can construct the values you intend to mutate using
660 // Builder::ForceMinimumBitWidth.
661 bool MutateInt(int64_t i) {
662 if (type_ == FBT_INT) {
663 return Mutate(data_, i, parent_width_, WidthI(i));
664 } else if (type_ == FBT_INDIRECT_INT) {
665 return Mutate(Indirect(), i, byte_width_, WidthI(i));
666 } else if (type_ == FBT_UINT) {
667 auto u = static_cast<uint64_t>(i);
668 return Mutate(data_, u, parent_width_, WidthU(u));
669 } else if (type_ == FBT_INDIRECT_UINT) {
670 auto u = static_cast<uint64_t>(i);
671 return Mutate(Indirect(), u, byte_width_, WidthU(u));
672 } else {
673 return false;
674 }
675 }
676
677 bool MutateBool(bool b) {
678 return type_ == FBT_BOOL && Mutate(data_, b, parent_width_, BIT_WIDTH_8);
679 }
680
681 bool MutateUInt(uint64_t u) {
682 if (type_ == FBT_UINT) {
683 return Mutate(data_, u, parent_width_, WidthU(u));
684 } else if (type_ == FBT_INDIRECT_UINT) {
685 return Mutate(Indirect(), u, byte_width_, WidthU(u));
686 } else if (type_ == FBT_INT) {
687 auto i = static_cast<int64_t>(u);
688 return Mutate(data_, i, parent_width_, WidthI(i));
689 } else if (type_ == FBT_INDIRECT_INT) {
690 auto i = static_cast<int64_t>(u);
691 return Mutate(Indirect(), i, byte_width_, WidthI(i));
692 } else {
693 return false;
694 }
695 }
696
697 bool MutateFloat(float f) {
698 if (type_ == FBT_FLOAT) {
699 return MutateF(data_, f, parent_width_, BIT_WIDTH_32);
700 } else if (type_ == FBT_INDIRECT_FLOAT) {
701 return MutateF(Indirect(), f, byte_width_, BIT_WIDTH_32);
702 } else {
703 return false;
704 }
705 }
706
707 bool MutateFloat(double d) {
708 if (type_ == FBT_FLOAT) {
709 return MutateF(data_, d, parent_width_, WidthF(d));
710 } else if (type_ == FBT_INDIRECT_FLOAT) {
711 return MutateF(Indirect(), d, byte_width_, WidthF(d));
712 } else {
713 return false;
714 }
715 }
716
717 bool MutateString(const char *str, size_t len) {
718 auto s = AsString();
719 if (s.IsTheEmptyString()) return false;
720 // This is very strict, could allow shorter strings, but that creates
721 // garbage.
722 if (s.length() != len) return false;
723 memcpy(const_cast<char *>(s.c_str()), str, len);
724 return true;
725 }
726 bool MutateString(const char *str) { return MutateString(str, strlen(str)); }
727 bool MutateString(const std::string &str) {
728 return MutateString(str.data(), str.length());
729 }
730
731 private:
732 const uint8_t *Indirect() const {
733 return flexbuffers::Indirect(data_, parent_width_);
734 }
735
736 template<typename T>
737 bool Mutate(const uint8_t *dest, T t, size_t byte_width,
738 BitWidth value_width) {
739 auto fits = static_cast<size_t>(static_cast<size_t>(1U) << value_width) <=
740 byte_width;
741 if (fits) {
742 t = flatbuffers::EndianScalar(t);
743 memcpy(const_cast<uint8_t *>(dest), &t, byte_width);
744 }
745 return fits;
746 }
747
748 template<typename T>
749 bool MutateF(const uint8_t *dest, T t, size_t byte_width,
750 BitWidth value_width) {
751 if (byte_width == sizeof(double))
752 return Mutate(dest, static_cast<double>(t), byte_width, value_width);
753 if (byte_width == sizeof(float))
754 return Mutate(dest, static_cast<float>(t), byte_width, value_width);
755 FLATBUFFERS_ASSERT(false);
756 return false;
757 }
758
759 const uint8_t *data_;
760 uint8_t parent_width_;
761 uint8_t byte_width_;
762 Type type_;
763};
764
765// Template specialization for As().
766template<> inline bool Reference::As<bool>() const { return AsBool(); }
767
768template<> inline int8_t Reference::As<int8_t>() const { return AsInt8(); }
769template<> inline int16_t Reference::As<int16_t>() const { return AsInt16(); }
770template<> inline int32_t Reference::As<int32_t>() const { return AsInt32(); }
771template<> inline int64_t Reference::As<int64_t>() const { return AsInt64(); }
772
773template<> inline uint8_t Reference::As<uint8_t>() const { return AsUInt8(); }
774template<> inline uint16_t Reference::As<uint16_t>() const {
775 return AsUInt16();
776}
777template<> inline uint32_t Reference::As<uint32_t>() const {
778 return AsUInt32();
779}
780template<> inline uint64_t Reference::As<uint64_t>() const {
781 return AsUInt64();
782}
783
784template<> inline double Reference::As<double>() const { return AsDouble(); }
785template<> inline float Reference::As<float>() const { return AsFloat(); }
786
787template<> inline String Reference::As<String>() const { return AsString(); }
788template<> inline std::string Reference::As<std::string>() const {
789 return AsString().str();
790}
791
792template<> inline Blob Reference::As<Blob>() const { return AsBlob(); }
793template<> inline Vector Reference::As<Vector>() const { return AsVector(); }
794template<> inline TypedVector Reference::As<TypedVector>() const {
795 return AsTypedVector();
796}
797template<> inline FixedTypedVector Reference::As<FixedTypedVector>() const {
798 return AsFixedTypedVector();
799}
800template<> inline Map Reference::As<Map>() const { return AsMap(); }
801
802inline uint8_t PackedType(BitWidth bit_width, Type type) {
803 return static_cast<uint8_t>(bit_width | (type << 2));
804}
805
806inline uint8_t NullPackedType() { return PackedType(BIT_WIDTH_8, FBT_NULL); }
807
808// Vector accessors.
809// Note: if you try to access outside of bounds, you get a Null value back
810// instead. Normally this would be an assert, but since this is "dynamically
811// typed" data, you may not want that (someone sends you a 2d vector and you
812// wanted 3d).
813// The Null converts seamlessly into a default value for any other type.
814// TODO(wvo): Could introduce an #ifdef that makes this into an assert?
815inline Reference Vector::operator[](size_t i) const {
816 auto len = size();
817 if (i >= len) return Reference(nullptr, 1, NullPackedType());
818 auto packed_type = (data_ + len * byte_width_)[i];
819 auto elem = data_ + i * byte_width_;
820 return Reference(elem, byte_width_, packed_type);
821}
822
823inline Reference TypedVector::operator[](size_t i) const {
824 auto len = size();
825 if (i >= len) return Reference(nullptr, 1, NullPackedType());
826 auto elem = data_ + i * byte_width_;
827 return Reference(elem, byte_width_, 1, type_);
828}
829
830inline Reference FixedTypedVector::operator[](size_t i) const {
831 if (i >= len_) return Reference(nullptr, 1, NullPackedType());
832 auto elem = data_ + i * byte_width_;
833 return Reference(elem, byte_width_, 1, type_);
834}
835
836template<typename T> int KeyCompare(const void *key, const void *elem) {
837 auto str_elem = reinterpret_cast<const char *>(
838 Indirect<T>(reinterpret_cast<const uint8_t *>(elem)));
839 auto skey = reinterpret_cast<const char *>(key);
840 return strcmp(skey, str_elem);
841}
842
843inline Reference Map::operator[](const char *key) const {
844 auto keys = Keys();
845 // We can't pass keys.byte_width_ to the comparison function, so we have
846 // to pick the right one ahead of time.
847 int (*comp)(const void *, const void *) = nullptr;
848 switch (keys.byte_width_) {
849 case 1: comp = KeyCompare<uint8_t>; break;
850 case 2: comp = KeyCompare<uint16_t>; break;
851 case 4: comp = KeyCompare<uint32_t>; break;
852 case 8: comp = KeyCompare<uint64_t>; break;
853 }
854 auto res = std::bsearch(key, keys.data_, keys.size(), keys.byte_width_, comp);
855 if (!res) return Reference(nullptr, 1, NullPackedType());
856 auto i = (reinterpret_cast<uint8_t *>(res) - keys.data_) / keys.byte_width_;
857 return (*static_cast<const Vector *>(this))[i];
858}
859
860inline Reference Map::operator[](const std::string &key) const {
861 return (*this)[key.c_str()];
862}
863
864inline Reference GetRoot(const uint8_t *buffer, size_t size) {
865 // See Finish() below for the serialization counterpart of this.
866 // The root starts at the end of the buffer, so we parse backwards from there.
867 auto end = buffer + size;
868 auto byte_width = *--end;
869 auto packed_type = *--end;
870 end -= byte_width; // The root data item.
871 return Reference(end, byte_width, packed_type);
872}
873
874inline Reference GetRoot(const std::vector<uint8_t> &buffer) {
875 return GetRoot(flatbuffers::vector_data(buffer), buffer.size());
876}
877
878// Flags that configure how the Builder behaves.
879// The "Share" flags determine if the Builder automatically tries to pool
880// this type. Pooling can reduce the size of serialized data if there are
881// multiple maps of the same kind, at the expense of slightly slower
882// serialization (the cost of lookups) and more memory use (std::set).
883// By default this is on for keys, but off for strings.
884// Turn keys off if you have e.g. only one map.
885// Turn strings on if you expect many non-unique string values.
886// Additionally, sharing key vectors can save space if you have maps with
887// identical field populations.
888enum BuilderFlag {
889 BUILDER_FLAG_NONE = 0,
890 BUILDER_FLAG_SHARE_KEYS = 1,
891 BUILDER_FLAG_SHARE_STRINGS = 2,
892 BUILDER_FLAG_SHARE_KEYS_AND_STRINGS = 3,
893 BUILDER_FLAG_SHARE_KEY_VECTORS = 4,
894 BUILDER_FLAG_SHARE_ALL = 7,
895};
896
897class Builder FLATBUFFERS_FINAL_CLASS {
898 public:
899 Builder(size_t initial_size = 256,
900 BuilderFlag flags = BUILDER_FLAG_SHARE_KEYS)
901 : buf_(initial_size),
902 finished_(false),
903 has_duplicate_keys_(false),
904 flags_(flags),
905 force_min_bit_width_(BIT_WIDTH_8),
906 key_pool(KeyOffsetCompare(buf_)),
907 string_pool(StringOffsetCompare(buf_)) {
908 buf_.clear();
909 }
910
911#ifdef FLATBUFFERS_DEFAULT_DECLARATION
912 Builder(Builder &&) = default;
913 Builder &operator=(Builder &&) = default;
914#endif
915
916 /// @brief Get the serialized buffer (after you call `Finish()`).
917 /// @return Returns a vector owned by this class.
918 const std::vector<uint8_t> &GetBuffer() const {
919 Finished();
920 return buf_;
921 }
922
923 // Size of the buffer. Does not include unfinished values.
924 size_t GetSize() const { return buf_.size(); }
925
926 // Reset all state so we can re-use the buffer.
927 void Clear() {
928 buf_.clear();
929 stack_.clear();
930 finished_ = false;
931 // flags_ remains as-is;
932 force_min_bit_width_ = BIT_WIDTH_8;
933 key_pool.clear();
934 string_pool.clear();
935 }
936
937 // All value constructing functions below have two versions: one that
938 // takes a key (for placement inside a map) and one that doesn't (for inside
939 // vectors and elsewhere).
940
941 void Null() { stack_.push_back(Value()); }
942 void Null(const char *key) {
943 Key(key);
944 Null();
945 }
946
947 void Int(int64_t i) { stack_.push_back(Value(i, FBT_INT, WidthI(i))); }
948 void Int(const char *key, int64_t i) {
949 Key(key);
950 Int(i);
951 }
952
953 void UInt(uint64_t u) { stack_.push_back(Value(u, FBT_UINT, WidthU(u))); }
954 void UInt(const char *key, uint64_t u) {
955 Key(key);
956 UInt(u);
957 }
958
959 void Float(float f) { stack_.push_back(Value(f)); }
960 void Float(const char *key, float f) {
961 Key(key);
962 Float(f);
963 }
964
965 void Double(double f) { stack_.push_back(Value(f)); }
966 void Double(const char *key, double d) {
967 Key(key);
968 Double(d);
969 }
970
971 void Bool(bool b) { stack_.push_back(Value(b)); }
972 void Bool(const char *key, bool b) {
973 Key(key);
974 Bool(b);
975 }
976
977 void IndirectInt(int64_t i) { PushIndirect(i, FBT_INDIRECT_INT, WidthI(i)); }
978 void IndirectInt(const char *key, int64_t i) {
979 Key(key);
980 IndirectInt(i);
981 }
982
983 void IndirectUInt(uint64_t u) {
984 PushIndirect(u, FBT_INDIRECT_UINT, WidthU(u));
985 }
986 void IndirectUInt(const char *key, uint64_t u) {
987 Key(key);
988 IndirectUInt(u);
989 }
990
991 void IndirectFloat(float f) {
992 PushIndirect(f, FBT_INDIRECT_FLOAT, BIT_WIDTH_32);
993 }
994 void IndirectFloat(const char *key, float f) {
995 Key(key);
996 IndirectFloat(f);
997 }
998
999 void IndirectDouble(double f) {
1000 PushIndirect(f, FBT_INDIRECT_FLOAT, WidthF(f));
1001 }
1002 void IndirectDouble(const char *key, double d) {
1003 Key(key);
1004 IndirectDouble(d);
1005 }
1006
1007 size_t Key(const char *str, size_t len) {
1008 auto sloc = buf_.size();
1009 WriteBytes(str, len + 1);
1010 if (flags_ & BUILDER_FLAG_SHARE_KEYS) {
1011 auto it = key_pool.find(sloc);
1012 if (it != key_pool.end()) {
1013 // Already in the buffer. Remove key we just serialized, and use
1014 // existing offset instead.
1015 buf_.resize(sloc);
1016 sloc = *it;
1017 } else {
1018 key_pool.insert(sloc);
1019 }
1020 }
1021 stack_.push_back(Value(static_cast<uint64_t>(sloc), FBT_KEY, BIT_WIDTH_8));
1022 return sloc;
1023 }
1024
1025 size_t Key(const char *str) { return Key(str, strlen(str)); }
1026 size_t Key(const std::string &str) { return Key(str.c_str(), str.size()); }
1027
1028 size_t String(const char *str, size_t len) {
1029 auto reset_to = buf_.size();
1030 auto sloc = CreateBlob(str, len, 1, FBT_STRING);
1031 if (flags_ & BUILDER_FLAG_SHARE_STRINGS) {
1032 StringOffset so(sloc, len);
1033 auto it = string_pool.find(so);
1034 if (it != string_pool.end()) {
1035 // Already in the buffer. Remove string we just serialized, and use
1036 // existing offset instead.
1037 buf_.resize(reset_to);
1038 sloc = it->first;
1039 stack_.back().u_ = sloc;
1040 } else {
1041 string_pool.insert(so);
1042 }
1043 }
1044 return sloc;
1045 }
1046 size_t String(const char *str) { return String(str, strlen(str)); }
1047 size_t String(const std::string &str) {
1048 return String(str.c_str(), str.size());
1049 }
1050 void String(const flexbuffers::String &str) {
1051 String(str.c_str(), str.length());
1052 }
1053
1054 void String(const char *key, const char *str) {
1055 Key(key);
1056 String(str);
1057 }
1058 void String(const char *key, const std::string &str) {
1059 Key(key);
1060 String(str);
1061 }
1062 void String(const char *key, const flexbuffers::String &str) {
1063 Key(key);
1064 String(str);
1065 }
1066
1067 size_t Blob(const void *data, size_t len) {
1068 return CreateBlob(data, len, 0, FBT_BLOB);
1069 }
1070 size_t Blob(const std::vector<uint8_t> &v) {
1071 return CreateBlob(flatbuffers::vector_data(v), v.size(), 0, FBT_BLOB);
1072 }
1073
1074 // TODO(wvo): support all the FlexBuffer types (like flexbuffers::String),
1075 // e.g. Vector etc. Also in overloaded versions.
1076 // Also some FlatBuffers types?
1077
1078 size_t StartVector() { return stack_.size(); }
1079 size_t StartVector(const char *key) {
1080 Key(key);
1081 return stack_.size();
1082 }
1083 size_t StartMap() { return stack_.size(); }
1084 size_t StartMap(const char *key) {
1085 Key(key);
1086 return stack_.size();
1087 }
1088
1089 // TODO(wvo): allow this to specify an aligment greater than the natural
1090 // alignment.
1091 size_t EndVector(size_t start, bool typed, bool fixed) {
1092 auto vec = CreateVector(start, stack_.size() - start, 1, typed, fixed);
1093 // Remove temp elements and return vector.
1094 stack_.resize(start);
1095 stack_.push_back(vec);
1096 return static_cast<size_t>(vec.u_);
1097 }
1098
1099 size_t EndMap(size_t start) {
1100 // We should have interleaved keys and values on the stack.
1101 // Make sure it is an even number:
1102 auto len = stack_.size() - start;
1103 FLATBUFFERS_ASSERT(!(len & 1));
1104 len /= 2;
1105 // Make sure keys are all strings:
1106 for (auto key = start; key < stack_.size(); key += 2) {
1107 FLATBUFFERS_ASSERT(stack_[key].type_ == FBT_KEY);
1108 }
1109 // Now sort values, so later we can do a binary search lookup.
1110 // We want to sort 2 array elements at a time.
1111 struct TwoValue {
1112 Value key;
1113 Value val;
1114 };
1115 // TODO(wvo): strict aliasing?
1116 // TODO(wvo): allow the caller to indicate the data is already sorted
1117 // for maximum efficiency? With an assert to check sortedness to make sure
1118 // we're not breaking binary search.
1119 // Or, we can track if the map is sorted as keys are added which would be
1120 // be quite cheap (cheaper than checking it here), so we can skip this
1121 // step automatically when appliccable, and encourage people to write in
1122 // sorted fashion.
1123 // std::sort is typically already a lot faster on sorted data though.
1124 auto dict =
1125 reinterpret_cast<TwoValue *>(flatbuffers::vector_data(stack_) + start);
1126 std::sort(dict, dict + len,
1127 [&](const TwoValue &a, const TwoValue &b) -> bool {
1128 auto as = reinterpret_cast<const char *>(
1129 flatbuffers::vector_data(buf_) + a.key.u_);
1130 auto bs = reinterpret_cast<const char *>(
1131 flatbuffers::vector_data(buf_) + b.key.u_);
1132 auto comp = strcmp(as, bs);
1133 // We want to disallow duplicate keys, since this results in a
1134 // map where values cannot be found.
1135 // But we can't assert here (since we don't want to fail on
1136 // random JSON input) or have an error mechanism.
1137 // Instead, we set has_duplicate_keys_ in the builder to
1138 // signal this.
1139 // TODO: Have to check for pointer equality, as some sort
1140 // implementation apparently call this function with the same
1141 // element?? Why?
1142 if (!comp && &a != &b) has_duplicate_keys_ = true;
1143 return comp < 0;
1144 });
1145 // First create a vector out of all keys.
1146 // TODO(wvo): if kBuilderFlagShareKeyVectors is true, see if we can share
1147 // the first vector.
1148 auto keys = CreateVector(start, len, 2, true, false);
1149 auto vec = CreateVector(start + 1, len, 2, false, false, &keys);
1150 // Remove temp elements and return map.
1151 stack_.resize(start);
1152 stack_.push_back(vec);
1153 return static_cast<size_t>(vec.u_);
1154 }
1155
1156 // Call this after EndMap to see if the map had any duplicate keys.
1157 // Any map with such keys won't be able to retrieve all values.
1158 bool HasDuplicateKeys() const { return has_duplicate_keys_; }
1159
1160 template<typename F> size_t Vector(F f) {
1161 auto start = StartVector();
1162 f();
1163 return EndVector(start, false, false);
1164 }
1165 template<typename F, typename T> size_t Vector(F f, T &state) {
1166 auto start = StartVector();
1167 f(state);
1168 return EndVector(start, false, false);
1169 }
1170 template<typename F> size_t Vector(const char *key, F f) {
1171 auto start = StartVector(key);
1172 f();
1173 return EndVector(start, false, false);
1174 }
1175 template<typename F, typename T>
1176 size_t Vector(const char *key, F f, T &state) {
1177 auto start = StartVector(key);
1178 f(state);
1179 return EndVector(start, false, false);
1180 }
1181
1182 template<typename T> void Vector(const T *elems, size_t len) {
1183 if (flatbuffers::is_scalar<T>::value) {
1184 // This path should be a lot quicker and use less space.
1185 ScalarVector(elems, len, false);
1186 } else {
1187 auto start = StartVector();
1188 for (size_t i = 0; i < len; i++) Add(elems[i]);
1189 EndVector(start, false, false);
1190 }
1191 }
1192 template<typename T>
1193 void Vector(const char *key, const T *elems, size_t len) {
1194 Key(key);
1195 Vector(elems, len);
1196 }
1197 template<typename T> void Vector(const std::vector<T> &vec) {
1198 Vector(flatbuffers::vector_data(vec), vec.size());
1199 }
1200
1201 template<typename F> size_t TypedVector(F f) {
1202 auto start = StartVector();
1203 f();
1204 return EndVector(start, true, false);
1205 }
1206 template<typename F, typename T> size_t TypedVector(F f, T &state) {
1207 auto start = StartVector();
1208 f(state);
1209 return EndVector(start, true, false);
1210 }
1211 template<typename F> size_t TypedVector(const char *key, F f) {
1212 auto start = StartVector(key);
1213 f();
1214 return EndVector(start, true, false);
1215 }
1216 template<typename F, typename T>
1217 size_t TypedVector(const char *key, F f, T &state) {
1218 auto start = StartVector(key);
1219 f(state);
1220 return EndVector(start, true, false);
1221 }
1222
1223 template<typename T> size_t FixedTypedVector(const T *elems, size_t len) {
1224 // We only support a few fixed vector lengths. Anything bigger use a
1225 // regular typed vector.
1226 FLATBUFFERS_ASSERT(len >= 2 && len <= 4);
1227 // And only scalar values.
1228 static_assert(flatbuffers::is_scalar<T>::value, "Unrelated types");
1229 return ScalarVector(elems, len, true);
1230 }
1231
1232 template<typename T>
1233 size_t FixedTypedVector(const char *key, const T *elems, size_t len) {
1234 Key(key);
1235 return FixedTypedVector(elems, len);
1236 }
1237
1238 template<typename F> size_t Map(F f) {
1239 auto start = StartMap();
1240 f();
1241 return EndMap(start);
1242 }
1243 template<typename F, typename T> size_t Map(F f, T &state) {
1244 auto start = StartMap();
1245 f(state);
1246 return EndMap(start);
1247 }
1248 template<typename F> size_t Map(const char *key, F f) {
1249 auto start = StartMap(key);
1250 f();
1251 return EndMap(start);
1252 }
1253 template<typename F, typename T> size_t Map(const char *key, F f, T &state) {
1254 auto start = StartMap(key);
1255 f(state);
1256 return EndMap(start);
1257 }
1258 template<typename T> void Map(const std::map<std::string, T> &map) {
1259 auto start = StartMap();
1260 for (auto it = map.begin(); it != map.end(); ++it)
1261 Add(it->first.c_str(), it->second);
1262 EndMap(start);
1263 }
1264
1265 // If you wish to share a value explicitly (a value not shared automatically
1266 // through one of the BUILDER_FLAG_SHARE_* flags) you can do so with these
1267 // functions. Or if you wish to turn those flags off for performance reasons
1268 // and still do some explicit sharing. For example:
1269 // builder.IndirectDouble(M_PI);
1270 // auto id = builder.LastValue(); // Remember where we stored it.
1271 // .. more code goes here ..
1272 // builder.ReuseValue(id); // Refers to same double by offset.
1273 // LastValue works regardless of whether the value has a key or not.
1274 // Works on any data type.
1275 struct Value;
1276 Value LastValue() { return stack_.back(); }
1277 void ReuseValue(Value v) { stack_.push_back(v); }
1278 void ReuseValue(const char *key, Value v) {
1279 Key(key);
1280 ReuseValue(v);
1281 }
1282
1283 // Overloaded Add that tries to call the correct function above.
1284 void Add(int8_t i) { Int(i); }
1285 void Add(int16_t i) { Int(i); }
1286 void Add(int32_t i) { Int(i); }
1287 void Add(int64_t i) { Int(i); }
1288 void Add(uint8_t u) { UInt(u); }
1289 void Add(uint16_t u) { UInt(u); }
1290 void Add(uint32_t u) { UInt(u); }
1291 void Add(uint64_t u) { UInt(u); }
1292 void Add(float f) { Float(f); }
1293 void Add(double d) { Double(d); }
1294 void Add(bool b) { Bool(b); }
1295 void Add(const char *str) { String(str); }
1296 void Add(const std::string &str) { String(str); }
1297 void Add(const flexbuffers::String &str) { String(str); }
1298
1299 template<typename T> void Add(const std::vector<T> &vec) { Vector(vec); }
1300
1301 template<typename T> void Add(const char *key, const T &t) {
1302 Key(key);
1303 Add(t);
1304 }
1305
1306 template<typename T> void Add(const std::map<std::string, T> &map) {
1307 Map(map);
1308 }
1309
1310 template<typename T> void operator+=(const T &t) { Add(t); }
1311
1312 // This function is useful in combination with the Mutate* functions above.
1313 // It forces elements of vectors and maps to have a minimum size, such that
1314 // they can later be updated without failing.
1315 // Call with no arguments to reset.
1316 void ForceMinimumBitWidth(BitWidth bw = BIT_WIDTH_8) {
1317 force_min_bit_width_ = bw;
1318 }
1319
1320 void Finish() {
1321 // If you hit this assert, you likely have objects that were never included
1322 // in a parent. You need to have exactly one root to finish a buffer.
1323 // Check your Start/End calls are matched, and all objects are inside
1324 // some other object.
1325 FLATBUFFERS_ASSERT(stack_.size() == 1);
1326
1327 // Write root value.
1328 auto byte_width = Align(stack_[0].ElemWidth(buf_.size(), 0));
1329 WriteAny(stack_[0], byte_width);
1330 // Write root type.
1331 Write(stack_[0].StoredPackedType(), 1);
1332 // Write root size. Normally determined by parent, but root has no parent :)
1333 Write(byte_width, 1);
1334
1335 finished_ = true;
1336 }
1337
1338 private:
1339 void Finished() const {
1340 // If you get this assert, you're attempting to get access a buffer
1341 // which hasn't been finished yet. Be sure to call
1342 // Builder::Finish with your root object.
1343 FLATBUFFERS_ASSERT(finished_);
1344 }
1345
1346 // Align to prepare for writing a scalar with a certain size.
1347 uint8_t Align(BitWidth alignment) {
1348 auto byte_width = 1U << alignment;
1349 buf_.insert(buf_.end(), flatbuffers::PaddingBytes(buf_.size(), byte_width),
1350 0);
1351 return static_cast<uint8_t>(byte_width);
1352 }
1353
1354 void WriteBytes(const void *val, size_t size) {
1355 buf_.insert(buf_.end(), reinterpret_cast<const uint8_t *>(val),
1356 reinterpret_cast<const uint8_t *>(val) + size);
1357 }
1358
1359 template<typename T> void Write(T val, size_t byte_width) {
1360 FLATBUFFERS_ASSERT(sizeof(T) >= byte_width);
1361 val = flatbuffers::EndianScalar(val);
1362 WriteBytes(&val, byte_width);
1363 }
1364
1365 void WriteDouble(double f, uint8_t byte_width) {
1366 switch (byte_width) {
1367 case 8: Write(f, byte_width); break;
1368 case 4: Write(static_cast<float>(f), byte_width); break;
1369 // case 2: Write(static_cast<half>(f), byte_width); break;
1370 // case 1: Write(static_cast<quarter>(f), byte_width); break;
1371 default: FLATBUFFERS_ASSERT(0);
1372 }
1373 }
1374
1375 void WriteOffset(uint64_t o, uint8_t byte_width) {
1376 auto reloff = buf_.size() - o;
1377 FLATBUFFERS_ASSERT(byte_width == 8 || reloff < 1ULL << (byte_width * 8));
1378 Write(reloff, byte_width);
1379 }
1380
1381 template<typename T> void PushIndirect(T val, Type type, BitWidth bit_width) {
1382 auto byte_width = Align(bit_width);
1383 auto iloc = buf_.size();
1384 Write(val, byte_width);
1385 stack_.push_back(Value(static_cast<uint64_t>(iloc), type, bit_width));
1386 }
1387
1388 static BitWidth WidthB(size_t byte_width) {
1389 switch (byte_width) {
1390 case 1: return BIT_WIDTH_8;
1391 case 2: return BIT_WIDTH_16;
1392 case 4: return BIT_WIDTH_32;
1393 case 8: return BIT_WIDTH_64;
1394 default: FLATBUFFERS_ASSERT(false); return BIT_WIDTH_64;
1395 }
1396 }
1397
1398 template<typename T> static Type GetScalarType() {
1399 static_assert(flatbuffers::is_scalar<T>::value, "Unrelated types");
1400 return flatbuffers::is_floating_point<T>::value
1401 ? FBT_FLOAT
1402 : flatbuffers::is_same<T, bool>::value
1403 ? FBT_BOOL
1404 : (flatbuffers::is_unsigned<T>::value ? FBT_UINT
1405 : FBT_INT);
1406 }
1407
1408 public:
1409 // This was really intended to be private, except for LastValue/ReuseValue.
1410 struct Value {
1411 union {
1412 int64_t i_;
1413 uint64_t u_;
1414 double f_;
1415 };
1416
1417 Type type_;
1418
1419 // For scalars: of itself, for vector: of its elements, for string: length.
1420 BitWidth min_bit_width_;
1421
1422 Value() : i_(0), type_(FBT_NULL), min_bit_width_(BIT_WIDTH_8) {}
1423
1424 Value(bool b)
1425 : u_(static_cast<uint64_t>(b)),
1426 type_(FBT_BOOL),
1427 min_bit_width_(BIT_WIDTH_8) {}
1428
1429 Value(int64_t i, Type t, BitWidth bw)
1430 : i_(i), type_(t), min_bit_width_(bw) {}
1431 Value(uint64_t u, Type t, BitWidth bw)
1432 : u_(u), type_(t), min_bit_width_(bw) {}
1433
1434 Value(float f)
1435 : f_(static_cast<double>(f)),
1436 type_(FBT_FLOAT),
1437 min_bit_width_(BIT_WIDTH_32) {}
1438 Value(double f) : f_(f), type_(FBT_FLOAT), min_bit_width_(WidthF(f)) {}
1439
1440 uint8_t StoredPackedType(BitWidth parent_bit_width_ = BIT_WIDTH_8) const {
1441 return PackedType(StoredWidth(parent_bit_width_), type_);
1442 }
1443
1444 BitWidth ElemWidth(size_t buf_size, size_t elem_index) const {
1445 if (IsInline(type_)) {
1446 return min_bit_width_;
1447 } else {
1448 // We have an absolute offset, but want to store a relative offset
1449 // elem_index elements beyond the current buffer end. Since whether
1450 // the relative offset fits in a certain byte_width depends on
1451 // the size of the elements before it (and their alignment), we have
1452 // to test for each size in turn.
1453 for (size_t byte_width = 1;
1454 byte_width <= sizeof(flatbuffers::largest_scalar_t);
1455 byte_width *= 2) {
1456 // Where are we going to write this offset?
1457 auto offset_loc = buf_size +
1458 flatbuffers::PaddingBytes(buf_size, byte_width) +
1459 elem_index * byte_width;
1460 // Compute relative offset.
1461 auto offset = offset_loc - u_;
1462 // Does it fit?
1463 auto bit_width = WidthU(offset);
1464 if (static_cast<size_t>(static_cast<size_t>(1U) << bit_width) ==
1465 byte_width)
1466 return bit_width;
1467 }
1468 FLATBUFFERS_ASSERT(false); // Must match one of the sizes above.
1469 return BIT_WIDTH_64;
1470 }
1471 }
1472
1473 BitWidth StoredWidth(BitWidth parent_bit_width_ = BIT_WIDTH_8) const {
1474 if (IsInline(type_)) {
1475 return (std::max)(min_bit_width_, parent_bit_width_);
1476 } else {
1477 return min_bit_width_;
1478 }
1479 }
1480 };
1481
1482 private:
1483 void WriteAny(const Value &val, uint8_t byte_width) {
1484 switch (val.type_) {
1485 case FBT_NULL:
1486 case FBT_INT: Write(val.i_, byte_width); break;
1487 case FBT_BOOL:
1488 case FBT_UINT: Write(val.u_, byte_width); break;
1489 case FBT_FLOAT: WriteDouble(val.f_, byte_width); break;
1490 default: WriteOffset(val.u_, byte_width); break;
1491 }
1492 }
1493
1494 size_t CreateBlob(const void *data, size_t len, size_t trailing, Type type) {
1495 auto bit_width = WidthU(len);
1496 auto byte_width = Align(bit_width);
1497 Write<uint64_t>(len, byte_width);
1498 auto sloc = buf_.size();
1499 WriteBytes(data, len + trailing);
1500 stack_.push_back(Value(static_cast<uint64_t>(sloc), type, bit_width));
1501 return sloc;
1502 }
1503
1504 template<typename T>
1505 size_t ScalarVector(const T *elems, size_t len, bool fixed) {
1506 auto vector_type = GetScalarType<T>();
1507 auto byte_width = sizeof(T);
1508 auto bit_width = WidthB(byte_width);
1509 // If you get this assert, you're trying to write a vector with a size
1510 // field that is bigger than the scalars you're trying to write (e.g. a
1511 // byte vector > 255 elements). For such types, write a "blob" instead.
1512 // TODO: instead of asserting, could write vector with larger elements
1513 // instead, though that would be wasteful.
1514 FLATBUFFERS_ASSERT(WidthU(len) <= bit_width);
1515 Align(bit_width);
1516 if (!fixed) Write<uint64_t>(len, byte_width);
1517 auto vloc = buf_.size();
1518 for (size_t i = 0; i < len; i++) Write(elems[i], byte_width);
1519 stack_.push_back(Value(static_cast<uint64_t>(vloc),
1520 ToTypedVector(vector_type, fixed ? len : 0),
1521 bit_width));
1522 return vloc;
1523 }
1524
1525 Value CreateVector(size_t start, size_t vec_len, size_t step, bool typed,
1526 bool fixed, const Value *keys = nullptr) {
1527 FLATBUFFERS_ASSERT(
1528 !fixed ||
1529 typed); // typed=false, fixed=true combination is not supported.
1530 // Figure out smallest bit width we can store this vector with.
1531 auto bit_width = (std::max)(force_min_bit_width_, WidthU(vec_len));
1532 auto prefix_elems = 1;
1533 if (keys) {
1534 // If this vector is part of a map, we will pre-fix an offset to the keys
1535 // to this vector.
1536 bit_width = (std::max)(bit_width, keys->ElemWidth(buf_.size(), 0));
1537 prefix_elems += 2;
1538 }
1539 Type vector_type = FBT_KEY;
1540 // Check bit widths and types for all elements.
1541 for (size_t i = start; i < stack_.size(); i += step) {
1542 auto elem_width =
1543 stack_[i].ElemWidth(buf_.size(), i - start + prefix_elems);
1544 bit_width = (std::max)(bit_width, elem_width);
1545 if (typed) {
1546 if (i == start) {
1547 vector_type = stack_[i].type_;
1548 } else {
1549 // If you get this assert, you are writing a typed vector with
1550 // elements that are not all the same type.
1551 FLATBUFFERS_ASSERT(vector_type == stack_[i].type_);
1552 }
1553 }
1554 }
1555 // If you get this assert, your fixed types are not one of:
1556 // Int / UInt / Float / Key.
1557 FLATBUFFERS_ASSERT(!fixed || IsTypedVectorElementType(vector_type));
1558 auto byte_width = Align(bit_width);
1559 // Write vector. First the keys width/offset if available, and size.
1560 if (keys) {
1561 WriteOffset(keys->u_, byte_width);
1562 Write<uint64_t>(1ULL << keys->min_bit_width_, byte_width);
1563 }
1564 if (!fixed) Write<uint64_t>(vec_len, byte_width);
1565 // Then the actual data.
1566 auto vloc = buf_.size();
1567 for (size_t i = start; i < stack_.size(); i += step) {
1568 WriteAny(stack_[i], byte_width);
1569 }
1570 // Then the types.
1571 if (!typed) {
1572 for (size_t i = start; i < stack_.size(); i += step) {
1573 buf_.push_back(stack_[i].StoredPackedType(bit_width));
1574 }
1575 }
1576 return Value(static_cast<uint64_t>(vloc),
1577 keys ? FBT_MAP
1578 : (typed ? ToTypedVector(vector_type, fixed ? vec_len : 0)
1579 : FBT_VECTOR),
1580 bit_width);
1581 }
1582
1583 // You shouldn't really be copying instances of this class.
1584 Builder(const Builder &);
1585 Builder &operator=(const Builder &);
1586
1587 std::vector<uint8_t> buf_;
1588 std::vector<Value> stack_;
1589
1590 bool finished_;
1591 bool has_duplicate_keys_;
1592
1593 BuilderFlag flags_;
1594
1595 BitWidth force_min_bit_width_;
1596
1597 struct KeyOffsetCompare {
1598 explicit KeyOffsetCompare(const std::vector<uint8_t> &buf) : buf_(&buf) {}
1599 bool operator()(size_t a, size_t b) const {
1600 auto stra =
1601 reinterpret_cast<const char *>(flatbuffers::vector_data(*buf_) + a);
1602 auto strb =
1603 reinterpret_cast<const char *>(flatbuffers::vector_data(*buf_) + b);
1604 return strcmp(stra, strb) < 0;
1605 }
1606 const std::vector<uint8_t> *buf_;
1607 };
1608
1609 typedef std::pair<size_t, size_t> StringOffset;
1610 struct StringOffsetCompare {
1611 explicit StringOffsetCompare(const std::vector<uint8_t> &buf)
1612 : buf_(&buf) {}
1613 bool operator()(const StringOffset &a, const StringOffset &b) const {
1614 auto stra = reinterpret_cast<const char *>(
1615 flatbuffers::vector_data(*buf_) + a.first);
1616 auto strb = reinterpret_cast<const char *>(
1617 flatbuffers::vector_data(*buf_) + b.first);
1618 return strncmp(stra, strb, (std::min)(a.second, b.second) + 1) < 0;
1619 }
1620 const std::vector<uint8_t> *buf_;
1621 };
1622
1623 typedef std::set<size_t, KeyOffsetCompare> KeyOffsetMap;
1624 typedef std::set<StringOffset, StringOffsetCompare> StringOffsetMap;
1625
1626 KeyOffsetMap key_pool;
1627 StringOffsetMap string_pool;
1628};
1629
1630} // namespace flexbuffers
1631
1632#if defined(_MSC_VER)
1633# pragma warning(pop)
1634#endif
1635
1636#endif // FLATBUFFERS_FLEXBUFFERS_H_