1/*
2 * Copyright 2017 Google Inc. All rights reserved.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *     http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef FLATBUFFERS_FLEXBUFFERS_H_
18#define FLATBUFFERS_FLEXBUFFERS_H_
19
20// We use the basic binary writing functions from the regular FlatBuffers.
21#include "flatbuffers/flatbuffers.h"
22#include "flatbuffers/util.h"
23
24#ifdef _MSC_VER
25#include <intrin.h>
26#endif
27
28namespace flexbuffers {
29
30class Reference;
31class Map;
32
33// These are used in the lower 2 bits of a type field to determine the size of
34// the elements (and or size field) of the item pointed to (e.g. vector).
35enum BitWidth {
36  BIT_WIDTH_8 = 0,
37  BIT_WIDTH_16 = 1,
38  BIT_WIDTH_32 = 2,
39  BIT_WIDTH_64 = 3,
40};
41
42// These are used as the upper 6 bits of a type field to indicate the actual
43// type.
44enum Type {
45  TYPE_NULL = 0,
46  TYPE_INT = 1,
47  TYPE_UINT = 2,
48  TYPE_FLOAT = 3,
49  // Types above stored inline, types below store an offset.
50  TYPE_KEY = 4,
51  TYPE_STRING = 5,
52  TYPE_INDIRECT_INT = 6,
53  TYPE_INDIRECT_UINT = 7,
54  TYPE_INDIRECT_FLOAT = 8,
55  TYPE_MAP = 9,
56  TYPE_VECTOR = 10,        // Untyped.
57  TYPE_VECTOR_INT = 11,    // Typed any size (stores no type table).
58  TYPE_VECTOR_UINT = 12,
59  TYPE_VECTOR_FLOAT = 13,
60  TYPE_VECTOR_KEY = 14,
61  TYPE_VECTOR_STRING = 15,
62  TYPE_VECTOR_INT2 = 16,   // Typed tuple (no type table, no size field).
63  TYPE_VECTOR_UINT2 = 17,
64  TYPE_VECTOR_FLOAT2 = 18,
65  TYPE_VECTOR_INT3 = 19,   // Typed triple (no type table, no size field).
66  TYPE_VECTOR_UINT3 = 20,
67  TYPE_VECTOR_FLOAT3 = 21,
68  TYPE_VECTOR_INT4 = 22,   // Typed quad (no type table, no size field).
69  TYPE_VECTOR_UINT4 = 23,
70  TYPE_VECTOR_FLOAT4 = 24,
71  TYPE_BLOB = 25,
72};
73
74inline bool IsInline(Type t) { return t <= TYPE_FLOAT; }
75
76inline bool IsTypedVectorElementType(Type t) {
77  return t >= TYPE_INT && t <= TYPE_STRING;
78}
79
80inline bool IsTypedVector(Type t) {
81  return t >= TYPE_VECTOR_INT && t <= TYPE_VECTOR_STRING;
82}
83
84inline bool IsFixedTypedVector(Type t) {
85  return t >= TYPE_VECTOR_INT2 && t <= TYPE_VECTOR_FLOAT4;
86}
87
88inline Type ToTypedVector(Type t, int fixed_len = 0) {
89  assert(IsTypedVectorElementType(t));
90  switch (fixed_len) {
91    case 0: return static_cast<Type>(t - TYPE_INT + TYPE_VECTOR_INT);
92    case 2: return static_cast<Type>(t - TYPE_INT + TYPE_VECTOR_INT2);
93    case 3: return static_cast<Type>(t - TYPE_INT + TYPE_VECTOR_INT3);
94    case 4: return static_cast<Type>(t - TYPE_INT + TYPE_VECTOR_INT4);
95    default: assert(0); return TYPE_NULL;
96  }
97}
98
99inline Type ToTypedVectorElementType(Type t) {
100  assert(IsTypedVector(t));
101  return static_cast<Type>(t - TYPE_VECTOR_INT + TYPE_INT);
102}
103
104inline Type ToFixedTypedVectorElementType(Type t, uint8_t *len) {
105  assert(IsFixedTypedVector(t));
106  auto fixed_type = t - TYPE_VECTOR_INT2;
107  *len = fixed_type / 3 + 2;  // 3 types each, starting from length 2.
108  return static_cast<Type>(fixed_type % 3 + TYPE_INT);
109}
110
111// TODO: implement proper support for 8/16bit floats, or decide not to
112// support them.
113typedef int16_t half;
114typedef int8_t quarter;
115
116// TODO: can we do this without conditionals using intrinsics or inline asm
117// on some platforms? Given branch prediction the method below should be
118// decently quick, but it is the most frequently executed function.
119// We could do an (unaligned) 64-bit read if we ifdef out the platforms for
120// which that doesn't work (or where we'd read into un-owned memory).
121template <typename R, typename T1, typename T2, typename T4, typename T8>
122R ReadSizedScalar(const uint8_t *data, uint8_t byte_width) {
123  return byte_width < 4
124    ? (byte_width < 2 ? static_cast<R>(flatbuffers::ReadScalar<T1>(data))
125                      : static_cast<R>(flatbuffers::ReadScalar<T2>(data)))
126    : (byte_width < 8 ? static_cast<R>(flatbuffers::ReadScalar<T4>(data))
127                      : static_cast<R>(flatbuffers::ReadScalar<T8>(data)));
128}
129
130
131inline int64_t ReadInt64(const uint8_t *data, uint8_t byte_width) {
132  return ReadSizedScalar<int64_t, int8_t, int16_t, int32_t, int64_t>(data,
133           byte_width);
134}
135
136inline uint64_t ReadUInt64(const uint8_t *data, uint8_t byte_width) {
137  // This is the "hottest" function (all offset lookups use this), so worth
138  // optimizing if possible.
139  // TODO: GCC apparently replaces memcpy by a rep movsb, but only if count is a
140  // constant, which here it isn't. Test if memcpy is still faster than
141  // the conditionals in ReadSizedScalar. Can also use inline asm.
142  #ifdef _MSC_VER
143    uint64_t u = 0;
144    __movsb(reinterpret_cast<uint8_t *>(&u),
145            reinterpret_cast<const uint8_t *>(data), byte_width);
146    return flatbuffers::EndianScalar(u);
147  #else
148    return ReadSizedScalar<uint64_t, uint8_t, uint16_t, uint32_t, uint64_t>(
149             data, byte_width);
150  #endif
151}
152
153inline double ReadDouble(const uint8_t *data, uint8_t byte_width) {
154  return ReadSizedScalar<double, quarter, half, float, double>(data,
155           byte_width);
156}
157
158const uint8_t *Indirect(const uint8_t *offset, uint8_t byte_width) {
159  return offset - ReadUInt64(offset, byte_width);
160}
161
162template<typename T> const uint8_t *Indirect(const uint8_t *offset) {
163  return offset - flatbuffers::ReadScalar<T>(offset);
164}
165
166static BitWidth WidthU(uint64_t u) {
167  #define FLATBUFFERS_GET_FIELD_BIT_WIDTH(value, width) { \
168    if (!((u) & ~((1ULL << (width)) - 1ULL))) return BIT_WIDTH_##width; \
169  }
170  FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 8);
171  FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 16);
172  FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 32);
173  #undef FLATBUFFERS_GET_FIELD_BIT_WIDTH
174  return BIT_WIDTH_64;
175}
176
177static BitWidth WidthI(int64_t i) {
178  auto u = static_cast<uint64_t>(i) << 1;
179  return WidthU(i >= 0 ? u : ~u);
180}
181
182static BitWidth WidthF(double f) {
183  return static_cast<double>(static_cast<float>(f)) == f ? BIT_WIDTH_32
184                                                         : BIT_WIDTH_64;
185}
186
187// Base class of all types below.
188// Points into the data buffer and allows access to one type.
189class Object {
190 public:
191  Object(const uint8_t *data, uint8_t byte_width)
192    : data_(data), byte_width_(byte_width) {}
193
194 protected:
195  const uint8_t *data_;
196  uint8_t byte_width_;
197};
198
199// Stores size in `byte_width_` bytes before data_ pointer.
200class Sized : public Object {
201 public:
202  Sized(const uint8_t *data, uint8_t byte_width) : Object(data, byte_width) {}
203  size_t size() const {
204    return static_cast<size_t>(ReadUInt64(data_ - byte_width_, byte_width_));
205  }
206};
207
208class String : public Sized {
209 public:
210  String(const uint8_t *data, uint8_t byte_width)
211    : Sized(data, byte_width) {}
212
213  size_t length() const { return size(); }
214  const char *c_str() const { return reinterpret_cast<const char *>(data_); }
215
216  static String EmptyString() {
217    static const uint8_t empty_string[] = { 0/*len*/, 0/*terminator*/ };
218    return String(empty_string + 1, 1);
219  }
220  bool IsTheEmptyString() const { return data_ == EmptyString().data_; }
221};
222
223class Blob : public Sized {
224 public:
225  Blob(const uint8_t *data, uint8_t byte_width)
226    : Sized(data, byte_width) {}
227
228  static Blob EmptyBlob() {
229    static const uint8_t empty_blob[] = { 0/*len*/ };
230    return Blob(empty_blob + 1, 1);
231  }
232  bool IsTheEmptyBlob() const { return data_ == EmptyBlob().data_; }
233};
234
235class Vector : public Sized {
236 public:
237  Vector(const uint8_t *data, uint8_t byte_width)
238    : Sized(data, byte_width) {}
239
240  Reference operator[](size_t i) const;
241
242  static Vector EmptyVector() {
243    static const uint8_t empty_vector[] = { 0/*len*/ };
244    return Vector(empty_vector + 1, 1);
245  }
246  bool IsTheEmptyVector() const { return data_ == EmptyVector().data_; }
247};
248
249class TypedVector : public Sized {
250 public:
251  TypedVector(const uint8_t *data, uint8_t byte_width, Type element_type)
252    : Sized(data, byte_width), type_(element_type) {}
253
254  Reference operator[](size_t i) const;
255
256  static TypedVector EmptyTypedVector() {
257    static const uint8_t empty_typed_vector[] = { 0/*len*/ };
258    return TypedVector(empty_typed_vector + 1, 1, TYPE_INT);
259  }
260  bool IsTheEmptyVector() const {
261    return data_ == TypedVector::EmptyTypedVector().data_;
262  }
263
264  Type ElementType() { return type_; }
265
266 private:
267  Type type_;
268
269  friend Map;
270};
271
272class FixedTypedVector : public Object {
273 public:
274  FixedTypedVector(const uint8_t *data, uint8_t byte_width, Type element_type,
275                   uint8_t len)
276    : Object(data, byte_width), type_(element_type), len_(len) {}
277
278  Reference operator[](size_t i) const;
279
280  static FixedTypedVector EmptyFixedTypedVector() {
281    static const uint8_t fixed_empty_vector[] = { 0/* unused */ };
282    return FixedTypedVector(fixed_empty_vector, 1, TYPE_INT, 0);
283  }
284  bool IsTheEmptyFixedTypedVector() const {
285    return data_ == FixedTypedVector::EmptyFixedTypedVector().data_;
286  }
287
288  Type ElementType() { return type_; }
289  uint8_t size() { return len_; }
290
291 private:
292  Type type_;
293  uint8_t len_;
294};
295
296class Map : public Vector {
297 public:
298  Map(const uint8_t *data, uint8_t byte_width)
299    : Vector(data, byte_width) {}
300
301  Reference operator[](const char *key) const;
302  Reference operator[](const std::string &key) const;
303
304  Vector Values() const { return Vector(data_, byte_width_); }
305
306  TypedVector Keys() const {
307    const size_t num_prefixed_fields = 3;
308    auto keys_offset = data_ - byte_width_ * num_prefixed_fields;
309    return TypedVector(Indirect(keys_offset, byte_width_),
310                       static_cast<uint8_t>(
311                         ReadUInt64(keys_offset + byte_width_, byte_width_)),
312                       TYPE_KEY);
313  }
314
315  static Map EmptyMap() {
316    static const uint8_t empty_map[] = {
317      0/*keys_len*/, 0/*keys_offset*/, 1/*keys_width*/, 0/*len*/
318    };
319    return Map(empty_map + 4, 1);
320  }
321
322  bool IsTheEmptyMap() const {
323    return data_ == EmptyMap().data_;
324  }
325};
326
327class Reference {
328 public:
329  Reference(const uint8_t *data, uint8_t parent_width, uint8_t byte_width,
330            Type type)
331    : data_(data), parent_width_(parent_width), byte_width_(byte_width),
332      type_(type) {}
333
334  Reference(const uint8_t *data, uint8_t parent_width, uint8_t packed_type)
335    : data_(data), parent_width_(parent_width) {
336    byte_width_ = 1U << static_cast<BitWidth>(packed_type & 3);
337    type_ = static_cast<Type>(packed_type >> 2);
338  }
339
340  Type GetType() const { return type_; }
341
342  bool IsNull() const { return type_ == TYPE_NULL; }
343  bool IsInt() const { return type_ == TYPE_INT ||
344                              type_ == TYPE_INDIRECT_INT; }
345  bool IsUInt() const { return type_ == TYPE_UINT||
346                               type_ == TYPE_INDIRECT_UINT;; }
347  bool IsIntOrUint() const { return IsInt() || IsUInt(); }
348  bool IsFloat() const { return type_ == TYPE_FLOAT ||
349                                type_ == TYPE_INDIRECT_FLOAT; }
350  bool IsNumeric() const { return IsIntOrUint() || IsFloat(); }
351  bool IsString() const { return type_ == TYPE_STRING; }
352  bool IsKey() const { return type_ == TYPE_KEY; }
353  bool IsVector() const { return type_ == TYPE_VECTOR || type_ == TYPE_MAP; }
354  bool IsMap() const { return type_ == TYPE_MAP; }
355
356  // Reads any type as a int64_t. Never fails, does most sensible conversion.
357  // Truncates floats, strings are attempted to be parsed for a number,
358  // vectors/maps return their size. Returns 0 if all else fails.
359  int64_t AsInt64() const {
360    if (type_ == TYPE_INT) {
361      // A fast path for the common case.
362      return ReadInt64(data_, parent_width_);
363    } else switch (type_) {
364      case TYPE_INDIRECT_INT: return ReadInt64(Indirect(), byte_width_);
365      case TYPE_UINT: return ReadUInt64(data_, parent_width_);
366      case TYPE_INDIRECT_UINT: return ReadUInt64(Indirect(), byte_width_);
367      case TYPE_FLOAT: return static_cast<int64_t>(
368                                ReadDouble(data_, parent_width_));
369      case TYPE_INDIRECT_FLOAT: return static_cast<int64_t>(
370                                         ReadDouble(Indirect(), byte_width_));
371      case TYPE_NULL: return 0;
372      case TYPE_STRING: return flatbuffers::StringToInt(AsString().c_str());
373      case TYPE_VECTOR: return static_cast<int64_t>(AsVector().size());
374      default:
375      // Convert other things to int.
376      return 0;
377    }
378  }
379
380  // TODO: could specialize these to not use AsInt64() if that saves
381  // extension ops in generated code, and use a faster op than ReadInt64.
382  int32_t AsInt32() const { return static_cast<int32_t>(AsInt64()); }
383  int16_t AsInt16() const { return static_cast<int16_t>(AsInt64()); }
384  int8_t  AsInt8()  const { return static_cast<int8_t> (AsInt64()); }
385
386  uint64_t AsUInt64() const {
387    if (type_ == TYPE_UINT) {
388      // A fast path for the common case.
389      return ReadUInt64(data_, parent_width_);
390    } else switch (type_) {
391      case TYPE_INDIRECT_UINT: return ReadUInt64(Indirect(), byte_width_);
392      case TYPE_INT: return ReadInt64(data_, parent_width_);
393      case TYPE_INDIRECT_INT: return ReadInt64(Indirect(), byte_width_);
394      case TYPE_FLOAT: return static_cast<uint64_t>(
395                                ReadDouble(data_, parent_width_));
396      case TYPE_INDIRECT_FLOAT: return static_cast<uint64_t>(
397                                  ReadDouble(Indirect(), byte_width_));
398      case TYPE_NULL: return 0;
399      case TYPE_STRING: return flatbuffers::StringToUInt(AsString().c_str());
400      case TYPE_VECTOR: return static_cast<uint64_t>(AsVector().size());
401      default:
402      // Convert other things to uint.
403      return 0;
404    }
405  }
406
407  uint32_t AsUInt32() const { return static_cast<uint32_t>(AsUInt64()); }
408  uint16_t AsUInt16() const { return static_cast<uint16_t>(AsUInt64()); }
409  uint8_t  AsUInt8()  const { return static_cast<uint8_t> (AsUInt64()); }
410
411  double AsDouble() const {
412    if (type_ == TYPE_FLOAT) {
413      // A fast path for the common case.
414      return ReadDouble(data_, parent_width_);
415    } else switch (type_) {
416      case TYPE_INDIRECT_FLOAT: return ReadDouble(Indirect(), byte_width_);
417      case TYPE_INT: return static_cast<double>(
418                              ReadInt64(data_, parent_width_));
419      case TYPE_UINT: return static_cast<double>(
420                               ReadUInt64(data_, parent_width_));
421      case TYPE_INDIRECT_INT: return static_cast<double>(
422                                       ReadInt64(Indirect(), byte_width_));
423      case TYPE_INDIRECT_UINT: return static_cast<double>(
424                                        ReadUInt64(Indirect(), byte_width_));
425      case TYPE_NULL: return 0.0;
426      case TYPE_STRING: return strtod(AsString().c_str(), nullptr);
427      case TYPE_VECTOR: return static_cast<double>(AsVector().size());
428      default:
429      // Convert strings and other things to float.
430      return 0;
431    }
432  }
433
434  float AsFloat() const { return static_cast<float>(AsDouble()); }
435
436  const char *AsKey() const {
437    if (type_ == TYPE_KEY) {
438      return reinterpret_cast<const char *>(Indirect());
439    } else {
440      return "";
441    }
442  }
443
444  // This function returns the empty string if you try to read a not-string.
445  String AsString() const {
446    if (type_ == TYPE_STRING) {
447      return String(Indirect(), byte_width_);
448    } else {
449      return String::EmptyString();
450    }
451  }
452
453  // Unlike AsString(), this will convert any type to a std::string.
454  std::string ToString() const {
455    if (type_ == TYPE_STRING) {
456      return String(Indirect(), byte_width_).c_str();
457    } else if (IsKey()) {
458      return AsKey();
459    } else if (IsInt()) {
460      return flatbuffers::NumToString(AsInt64());
461    } else if (IsUInt()) {
462      return flatbuffers::NumToString(AsUInt64());
463    } else if (IsFloat()) {
464      return flatbuffers::NumToString(AsDouble());
465    } else if (IsNull()) {
466      return "null";
467    } else if (IsMap()) {
468      return "{..}";  // TODO: show elements.
469    } else if (IsVector()) {
470      return "[..]";  // TODO: show elements.
471    } else {
472      return "(?)";
473    }
474  }
475
476  // This function returns the empty blob if you try to read a not-blob.
477  // Strings can be viewed as blobs too.
478  Blob AsBlob() const {
479    if (type_ == TYPE_BLOB || type_ == TYPE_STRING) {
480      return Blob(Indirect(), byte_width_);
481    } else {
482      return Blob::EmptyBlob();
483    }
484  }
485
486  // This function returns the empty vector if you try to read a not-vector.
487  // Maps can be viewed as vectors too.
488  Vector AsVector() const {
489    if (type_ == TYPE_VECTOR || type_ == TYPE_MAP) {
490      return Vector(Indirect(), byte_width_);
491    } else {
492      return Vector::EmptyVector();
493    }
494  }
495
496  TypedVector AsTypedVector() const {
497    if (IsTypedVector(type_)) {
498      return TypedVector(Indirect(), byte_width_,
499                         ToTypedVectorElementType(type_));
500    } else {
501      return TypedVector::EmptyTypedVector();
502    }
503  }
504
505  FixedTypedVector AsFixedTypedVector() const {
506    if (IsFixedTypedVector(type_)) {
507      uint8_t len = 0;
508      auto vtype = ToFixedTypedVectorElementType(type_, &len);
509      return FixedTypedVector(Indirect(), byte_width_, vtype, len);
510    } else {
511      return FixedTypedVector::EmptyFixedTypedVector();
512    }
513  }
514
515  Map AsMap() const {
516    if (type_ == TYPE_MAP) {
517      return Map(Indirect(), byte_width_);
518    } else {
519      return Map::EmptyMap();
520    }
521  }
522
523  // Experimental: Mutation functions.
524  // These allow scalars in an already created buffer to be updated in-place.
525  // Since by default scalars are stored in the smallest possible space,
526  // the new value may not fit, in which case these functions return false.
527  // To avoid this, you can construct the values you intend to mutate using
528  // Builder::ForceMinimumBitWidth.
529  bool MutateInt(int64_t i) {
530    if (type_ == TYPE_INT) {
531      return Mutate(data_, i, parent_width_, WidthI(i));
532    } else if (type_ == TYPE_INDIRECT_INT) {
533      return Mutate(Indirect(), i, byte_width_, WidthI(i));
534    } else if (type_ == TYPE_UINT) {
535      auto u = static_cast<uint64_t>(i);
536      return Mutate(data_, u, parent_width_, WidthU(u));
537    } else if (type_ == TYPE_INDIRECT_UINT) {
538      auto u = static_cast<uint64_t>(i);
539      return Mutate(Indirect(), u, byte_width_, WidthU(u));
540    } else {
541      return false;
542    }
543  }
544
545  bool MutateUInt(uint64_t u) {
546    if (type_ == TYPE_UINT) {
547      return Mutate(data_, u, parent_width_, WidthU(u));
548    } else if (type_ == TYPE_INDIRECT_UINT) {
549      return Mutate(Indirect(), u, byte_width_, WidthU(u));
550    } else if (type_ == TYPE_INT) {
551      auto i = static_cast<int64_t>(u);
552      return Mutate(data_, i, parent_width_, WidthI(i));
553    } else if (type_ == TYPE_INDIRECT_INT) {
554      auto i = static_cast<int64_t>(u);
555      return Mutate(Indirect(), i, byte_width_, WidthI(i));
556    } else {
557      return false;
558    }
559  }
560
561  bool MutateFloat(float f) {
562    if (type_ == TYPE_FLOAT) {
563      return MutateF(data_, f, parent_width_, BIT_WIDTH_32);
564    } else if (type_ == TYPE_INDIRECT_FLOAT) {
565      return MutateF(Indirect(), f, byte_width_, BIT_WIDTH_32);
566    } else {
567      return false;
568    }
569  }
570
571  bool MutateFloat(double d) {
572    if (type_ == TYPE_FLOAT) {
573      return MutateF(data_, d, parent_width_, WidthF(d));
574    } else if (type_ == TYPE_INDIRECT_FLOAT) {
575      return MutateF(Indirect(), d, byte_width_, WidthF(d));
576    } else {
577      return false;
578    }
579  }
580
581  bool MutateString(const char *str, size_t len) {
582    auto s = AsString();
583    if (s.IsTheEmptyString()) return false;
584    // This is very strict, could allow shorter strings, but that creates
585    // garbage.
586    if (s.length() != len) return false;
587    memcpy(const_cast<char *>(s.c_str()), str, len);
588    return true;
589  }
590  bool MutateString(const char *str) {
591    return MutateString(str, strlen(str));
592  }
593  bool MutateString(const std::string &str) {
594    return MutateString(str.data(), str.length());
595  }
596
597 private:
598  const uint8_t *Indirect() const {
599    return flexbuffers::Indirect(data_, parent_width_);
600  }
601
602  template<typename T> bool Mutate(const uint8_t *dest, T t, size_t byte_width,
603                                   BitWidth value_width) {
604    auto fits = (1U << value_width) <= byte_width;
605    if (fits) {
606      t = flatbuffers::EndianScalar(t);
607      memcpy(const_cast<uint8_t *>(dest), &t, byte_width);
608    }
609    return fits;
610  }
611
612  template<typename T> bool MutateF(const uint8_t *dest, T t, size_t byte_width,
613                                    BitWidth value_width) {
614    if (byte_width == sizeof(double))
615      return Mutate(dest, static_cast<double>(t), byte_width, value_width);
616    if (byte_width == sizeof(float))
617      return Mutate(dest, static_cast<float>(t), byte_width, value_width);
618    assert(false);
619    return false;
620  }
621
622  const uint8_t *data_;
623  uint8_t parent_width_;
624  uint8_t byte_width_;
625  Type type_;
626};
627
628inline uint8_t PackedType(BitWidth bit_width, Type type) {
629  return static_cast<uint8_t>(bit_width | (type << 2));
630}
631
632inline uint8_t NullPackedType() {
633  return PackedType(BIT_WIDTH_8, TYPE_NULL);
634}
635
636// Vector accessors.
637// Note: if you try to access outside of bounds, you get a Null value back
638// instead. Normally this would be an assert, but since this is "dynamically
639// typed" data, you may not want that (someone sends you a 2d vector and you
640// wanted 3d).
641// The Null converts seamlessly into a default value for any other type.
642// TODO(wvo): Could introduce an #ifdef that makes this into an assert?
643inline Reference Vector::operator[](size_t i) const  {
644  auto len = size();
645  if (i >= len) return Reference(nullptr, 1, NullPackedType());
646  auto packed_type = (data_ + len * byte_width_)[i];
647  auto elem = data_ + i * byte_width_;
648  return Reference(elem, byte_width_, packed_type);
649}
650
651inline Reference TypedVector::operator[](size_t i) const  {
652  auto len = size();
653  if (i >= len) return Reference(nullptr, 1, NullPackedType());
654  auto elem = data_ + i * byte_width_;
655  return Reference(elem, byte_width_, 1, type_);
656}
657
658inline Reference FixedTypedVector::operator[](size_t i) const  {
659  if (i >= len_) return Reference(nullptr, 1, NullPackedType());
660  auto elem = data_ + i * byte_width_;
661  return Reference(elem, byte_width_, 1, type_);
662}
663
664template<typename T> int KeyCompare(const void *key, const void *elem) {
665  auto str_elem = reinterpret_cast<const char *>(
666                    Indirect<T>(reinterpret_cast<const uint8_t *>(elem)));
667  auto skey = reinterpret_cast<const char *>(key);
668  return strcmp(skey, str_elem);
669}
670
671inline Reference Map::operator[](const char *key) const {
672  auto keys = Keys();
673  // We can't pass keys.byte_width_ to the comparison function, so we have
674  // to pick the right one ahead of time.
675  int (*comp)(const void *, const void *) = nullptr;
676  switch (keys.byte_width_) {
677    case 1: comp = KeyCompare<uint8_t>; break;
678    case 2: comp = KeyCompare<uint16_t>; break;
679    case 4: comp = KeyCompare<uint32_t>; break;
680    case 8: comp = KeyCompare<uint64_t>; break;
681  }
682  auto res = std::bsearch(key, keys.data_, keys.size(), keys.byte_width_, comp);
683  if (!res)
684    return Reference(nullptr, 1, NullPackedType());
685  auto i = (reinterpret_cast<uint8_t *>(res) - keys.data_) / keys.byte_width_;
686  return (*static_cast<const Vector *>(this))[i];
687}
688
689inline Reference Map::operator[](const std::string &key) const {
690  return (*this)[key.c_str()];
691}
692
693inline Reference GetRoot(const uint8_t *buffer, size_t size) {
694  // See Finish() below for the serialization counterpart of this.
695  // The root starts at the end of the buffer, so we parse backwards from there.
696  auto end = buffer + size;
697  auto byte_width = *--end;
698  auto packed_type = *--end;
699  end -= byte_width;  // The root data item.
700  return Reference(end, byte_width, packed_type);
701}
702
703inline Reference GetRoot(const std::vector<uint8_t> &buffer) {
704  return GetRoot(buffer.data(), buffer.size());
705}
706
707// Flags that configure how the Builder behaves.
708// The "Share" flags determine if the Builder automatically tries to pool
709// this type. Pooling can reduce the size of serialized data if there are
710// multiple maps of the same kind, at the expense of slightly slower
711// serialization (the cost of lookups) and more memory use (std::set).
712// By default this is on for keys, but off for strings.
713// Turn keys off if you have e.g. only one map.
714// Turn strings on if you expect many non-unique string values.
715// Additionally, sharing key vectors can save space if you have maps with
716// identical field populations.
717enum BuilderFlag {
718  BUILDER_FLAG_NONE = 0,
719  BUILDER_FLAG_SHARE_KEYS = 1,
720  BUILDER_FLAG_SHARE_STRINGS = 2,
721  BUILDER_FLAG_SHARE_KEYS_AND_STRINGS = 3,
722  BUILDER_FLAG_SHARE_KEY_VECTORS = 4,
723  BUILDER_FLAG_SHARE_ALL = 7,
724};
725
726class Builder FLATBUFFERS_FINAL_CLASS {
727 public:
728  Builder(size_t initial_size = 256,
729          BuilderFlag flags = BUILDER_FLAG_SHARE_KEYS)
730      : buf_(initial_size), finished_(false), flags_(flags),
731        force_min_bit_width_(BIT_WIDTH_8), key_pool(KeyOffsetCompare(buf_)),
732        string_pool(StringOffsetCompare(buf_)) {
733    buf_.clear();
734  }
735
736  /// @brief Get the serialized buffer (after you call `Finish()`).
737  /// @return Returns a vector owned by this class.
738  const std::vector<uint8_t> &GetBuffer() const {
739    Finished();
740    return buf_;
741  }
742
743  // All value constructing functions below have two versions: one that
744  // takes a key (for placement inside a map) and one that doesn't (for inside
745  // vectors and elsewhere).
746
747  void Null() { stack_.push_back(Value()); }
748  void Null(const char *key) { Key(key); Null(); }
749
750  void Int(int64_t i) { stack_.push_back(Value(i, TYPE_INT, WidthI(i))); }
751  void Int(const char *key, int64_t i) { Key(key); Int(i); }
752
753  void UInt(uint64_t u) { stack_.push_back(Value(u, TYPE_UINT, WidthU(u))); }
754  void UInt(const char *key, uint64_t u) { Key(key); Int(u); }
755
756  void Float(float f) { stack_.push_back(Value(f)); }
757  void Float(const char *key, float f) { Key(key); Float(f); }
758
759  void Double(double f) { stack_.push_back(Value(f)); }
760  void Double(const char *key, double d) { Key(key); Double(d); }
761
762  void Bool(bool b) { Int(static_cast<int64_t>(b)); }
763  void Bool(const char *key, bool b) { Key(key); Bool(b); }
764
765  void IndirectInt(int64_t i) {
766    PushIndirect(i, TYPE_INDIRECT_INT, WidthI(i));
767  }
768  void IndirectInt(const char *key, int64_t i) {
769    Key(key);
770    IndirectInt(i);
771  }
772
773  void IndirectUInt(uint64_t u) {
774    PushIndirect(u, TYPE_INDIRECT_UINT, WidthU(u));
775  }
776  void IndirectUInt(const char *key, uint64_t u) {
777    Key(key);
778    IndirectUInt(u);
779  }
780
781  void IndirectFloat(float f) {
782    PushIndirect(f, TYPE_INDIRECT_FLOAT, BIT_WIDTH_32);
783  }
784  void IndirectFloat(const char *key, float f) {
785    Key(key);
786    IndirectFloat(f);
787  }
788
789  void IndirectDouble(double f) {
790    PushIndirect(f, TYPE_INDIRECT_FLOAT, WidthF(f));
791  }
792  void IndirectDouble(const char *key, double d) {
793    Key(key);
794    IndirectDouble(d);
795  }
796
797  size_t Key(const char *str, size_t len) {
798    auto sloc = buf_.size();
799    WriteBytes(str, len + 1);
800    if (flags_ & BUILDER_FLAG_SHARE_KEYS) {
801      auto it = key_pool.find(sloc);
802      if (it != key_pool.end()) {
803        // Already in the buffer. Remove key we just serialized, and use
804        // existing offset instead.
805        buf_.resize(sloc);
806        sloc = *it;
807      } else {
808        key_pool.insert(sloc);
809      }
810    }
811    stack_.push_back(Value(static_cast<uint64_t>(sloc), TYPE_KEY, BIT_WIDTH_8));
812    return sloc;
813  }
814
815  size_t Key(const char *str) { return Key(str, strlen(str)); }
816  size_t Key(const std::string &str) { return Key(str.c_str(), str.size()); }
817
818  size_t String(const char *str, size_t len) {
819    auto reset_to = buf_.size();
820    auto sloc = CreateBlob(str, len, 1, TYPE_STRING);
821    if (flags_ & BUILDER_FLAG_SHARE_STRINGS) {
822      StringOffset so(sloc, len);
823      auto it = string_pool.find(so);
824      if (it != string_pool.end()) {
825        // Already in the buffer. Remove string we just serialized, and use
826        // existing offset instead.
827        buf_.resize(reset_to);
828        sloc = it->first;
829        stack_.back().u_ = sloc;
830      } else {
831        string_pool.insert(so);
832      }
833    }
834    return sloc;
835  }
836  size_t String(const char *str) {
837    return String(str, strlen(str));
838  }
839  size_t String(const std::string &str) {
840    return String(str.c_str(), str.size());
841  }
842  void String(const flexbuffers::String &str) {
843    String(str.c_str(), str.length());
844  }
845
846  void String(const char *key, const char *str) {
847    Key(key);
848    String(str);
849  }
850  void String(const char *key, const std::string &str) {
851    Key(key);
852    String(str);
853  }
854  void String(const char *key, const flexbuffers::String &str) {
855    Key(key);
856    String(str);
857  }
858
859  size_t Blob(const void *data, size_t len) {
860    return CreateBlob(data, len, 0, TYPE_BLOB);
861  }
862  size_t Blob(const std::vector<uint8_t> &v) {
863    return CreateBlob(v.data(), v.size(), 0, TYPE_BLOB);
864  }
865
866  // TODO(wvo): support all the FlexBuffer types (like flexbuffers::String),
867  // e.g. Vector etc. Also in overloaded versions.
868  // Also some FlatBuffers types?
869
870  size_t StartVector() { return stack_.size(); }
871  size_t StartVector(const char *key) { Key(key); return stack_.size(); }
872  size_t StartMap() { return stack_.size(); }
873  size_t StartMap(const char *key) { Key(key); return stack_.size(); }
874
875  // TODO(wvo): allow this to specify an aligment greater than the natural
876  // alignment.
877  size_t EndVector(size_t start, bool typed, bool fixed) {
878    auto vec = CreateVector(start, stack_.size() - start, 1, typed, fixed);
879    // Remove temp elements and return vector.
880    stack_.resize(start);
881    stack_.push_back(vec);
882    return static_cast<size_t>(vec.u_);
883  }
884
885  size_t EndMap(size_t start) {
886    // We should have interleaved keys and values on the stack.
887    // Make sure it is an even number:
888    auto len = stack_.size() - start;
889    assert(!(len & 1));
890    len /= 2;
891    // Make sure keys are all strings:
892    for (auto key = start; key < stack_.size(); key += 2) {
893      assert(stack_[key].type_ == TYPE_KEY);
894    }
895    // Now sort values, so later we can do a binary seach lookup.
896    // We want to sort 2 array elements at a time.
897    struct TwoValue { Value key; Value val; };
898    // TODO(wvo): strict aliasing?
899    // TODO(wvo): allow the caller to indicate the data is already sorted
900    // for maximum efficiency? With an assert to check sortedness to make sure
901    // we're not breaking binary search.
902    // Or, we can track if the map is sorted as keys are added which would be
903    // be quite cheap (cheaper than checking it here), so we can skip this
904    // step automatically when appliccable, and encourage people to write in
905    // sorted fashion.
906    // std::sort is typically already a lot faster on sorted data though.
907    auto dict = reinterpret_cast<TwoValue *>(stack_.data() + start);
908    std::sort(dict, dict + len,
909              [&](const TwoValue &a, const TwoValue &b) -> bool {
910      auto as = reinterpret_cast<const char *>(buf_.data() + a.key.u_);
911      auto bs = reinterpret_cast<const char *>(buf_.data() + b.key.u_);
912      auto comp = strcmp(as, bs);
913      // If this assertion hits, you've added two keys with the same value to
914      // this map.
915      // TODO: Have to check for pointer equality, as some sort implementation
916      // apparently call this function with the same element?? Why?
917      assert(comp || &a == &b);
918      return comp < 0;
919    });
920    // First create a vector out of all keys.
921    // TODO(wvo): if kBuilderFlagShareKeyVectors is true, see if we can share
922    // the first vector.
923    auto keys = CreateVector(start, len, 2, true, false);
924    auto vec = CreateVector(start + 1, len, 2, false, false, &keys);
925    // Remove temp elements and return map.
926    stack_.resize(start);
927    stack_.push_back(vec);
928    return static_cast<size_t>(vec.u_);
929  }
930
931  template<typename F> size_t Vector(F f) {
932    auto start = StartVector();
933    f();
934    return EndVector(start, false, false);
935  }
936  template<typename F> size_t Vector(const char *key, F f) {
937    auto start = StartVector(key);
938    f();
939    return EndVector(start, false, false);
940  }
941  template<typename T> void Vector(const T *elems, size_t len) {
942    if (std::is_scalar<T>::value) {
943      // This path should be a lot quicker and use less space.
944      ScalarVector(elems, len, false);
945    } else {
946      auto start = StartVector();
947      for (size_t i = 0; i < len; i++) Add(elems[i]);
948      EndVector(start, false, false);
949    }
950  }
951  template<typename T> void Vector(const char *key, const T *elems,
952                                   size_t len) {
953    Key(key);
954    Vector(elems, len);
955  }
956  template<typename T> void Vector(const std::vector<T> &vec) {
957    Vector(vec.data(), vec.size());
958  }
959
960  template<typename F> size_t TypedVector(F f) {
961    auto start = StartVector();
962    f();
963    return EndVector(start, true, false);
964  }
965  template<typename F> size_t TypedVector(const char *key, F f) {
966    auto start = StartVector(key);
967    f();
968    return EndVector(start, true, false);
969  }
970
971  template<typename T> size_t FixedTypedVector(const T *elems, size_t len) {
972    // We only support a few fixed vector lengths. Anything bigger use a
973    // regular typed vector.
974    assert(len >= 2 && len <= 4);
975    // And only scalar values.
976    assert(std::is_scalar<T>::value);
977    return ScalarVector(elems, len, true);
978  }
979
980  template<typename T> size_t FixedTypedVector(const char *key, const T *elems,
981                                               size_t len) {
982    Key(key);
983    return FixedTypedVector(elems, len);
984  }
985
986  template<typename F> size_t Map(F f) {
987    auto start = StartMap();
988    f();
989    return EndMap(start);
990  }
991  template<typename F> size_t Map(const char *key, F f) {
992    auto start = StartMap(key);
993    f();
994    return EndMap(start);
995  }
996  template<typename T> void Map(const std::map<std::string, T> &map) {
997    auto start = StartMap();
998    for (auto it = map.begin(); it != map.end(); ++it)
999      Add(it->first.c_str(), it->second);
1000    EndMap(start);
1001  }
1002
1003  // Overloaded Add that tries to call the correct function above.
1004  void Add(int8_t i) { Int(i); }
1005  void Add(int16_t i) { Int(i); }
1006  void Add(int32_t i) { Int(i); }
1007  void Add(int64_t i) { Int(i); }
1008  void Add(uint8_t u) { UInt(u); }
1009  void Add(uint16_t u) { UInt(u); }
1010  void Add(uint32_t u) { UInt(u); }
1011  void Add(uint64_t u) { UInt(u); }
1012  void Add(float f) { Float(f); }
1013  void Add(double d) { Double(d); }
1014  void Add(bool b) { Bool(b); }
1015  void Add(const char *str) { String(str); }
1016  void Add(const std::string &str) { String(str); }
1017  void Add(const flexbuffers::String &str) { String(str); }
1018
1019  template<typename T> void Add(const std::vector<T> &vec) {
1020    Vector(vec);
1021  }
1022
1023  template<typename T> void Add(const char *key, const T &t) {
1024    Key(key);
1025    Add(t);
1026  }
1027
1028  template<typename T> void Add(const std::map<std::string, T> &map) {
1029    Map(map);
1030  }
1031
1032  template<typename T> void operator+=(const T &t) {
1033    Add(t);
1034  }
1035
1036  // This function is useful in combination with the Mutate* functions above.
1037  // It forces elements of vectors and maps to have a minimum size, such that
1038  // they can later be updated without failing.
1039  // Call with no arguments to reset.
1040  void ForceMinimumBitWidth(BitWidth bw = BIT_WIDTH_8) {
1041    force_min_bit_width_ = bw;
1042  }
1043
1044  void Finish() {
1045    // If you hit this assert, you likely have objects that were never included
1046    // in a parent. You need to have exactly one root to finish a buffer.
1047    // Check your Start/End calls are matched, and all objects are inside
1048    // some other object.
1049    assert(stack_.size() == 1);
1050
1051    // Write root value.
1052    auto byte_width = Align(stack_[0].ElemWidth(buf_.size(), 0));
1053    WriteAny(stack_[0], byte_width);
1054    // Write root type.
1055    Write(stack_[0].StoredPackedType(), 1);
1056    // Write root size. Normally determined by parent, but root has no parent :)
1057    Write(byte_width, 1);
1058
1059    finished_ = true;
1060  }
1061
1062 private:
1063  void Finished() const {
1064    // If you get this assert, you're attempting to get access a buffer
1065    // which hasn't been finished yet. Be sure to call
1066    // Builder::Finish with your root object.
1067    assert(finished_);
1068  }
1069
1070  // Align to prepare for writing a scalar with a certain size.
1071  uint8_t Align(BitWidth alignment) {
1072    auto byte_width = 1U << alignment;
1073    buf_.insert(buf_.end(), flatbuffers::PaddingBytes(buf_.size(), byte_width),
1074                0);
1075    return byte_width;
1076  }
1077
1078  void WriteBytes(const void *val, size_t size) {
1079    buf_.insert(buf_.end(),
1080                reinterpret_cast<const uint8_t *>(val),
1081                reinterpret_cast<const uint8_t *>(val) + size);
1082  }
1083
1084  // For values T >= byte_width
1085  template<typename T> void Write(T val, uint8_t byte_width) {
1086    val = flatbuffers::EndianScalar(val);
1087    WriteBytes(&val, byte_width);
1088  }
1089
1090  void WriteDouble(double f, uint8_t byte_width) {
1091    switch (byte_width) {
1092      case 8: Write(f, byte_width); break;
1093      case 4: Write(static_cast<float>(f), byte_width); break;
1094      //case 2: Write(static_cast<half>(f), byte_width); break;
1095      //case 1: Write(static_cast<quarter>(f), byte_width); break;
1096      default: assert(0);
1097    }
1098  }
1099
1100  void WriteOffset(uint64_t o, uint8_t byte_width) {
1101    auto reloff = buf_.size() - o;
1102    assert(reloff < 1ULL << (byte_width * 8) || byte_width == 8);
1103    Write(reloff, byte_width);
1104  }
1105
1106  template<typename T> void PushIndirect(T val, Type type, BitWidth bit_width) {
1107    auto byte_width = Align(bit_width);
1108    auto iloc = buf_.size();
1109    Write(val, byte_width);
1110    stack_.push_back(Value(static_cast<uint64_t>(iloc), type, bit_width));
1111  }
1112
1113  static BitWidth WidthB(size_t byte_width) {
1114    switch (byte_width) {
1115      case 1: return BIT_WIDTH_8;
1116      case 2: return BIT_WIDTH_16;
1117      case 4: return BIT_WIDTH_32;
1118      case 8: return BIT_WIDTH_64;
1119      default: assert(false); return BIT_WIDTH_64;
1120    }
1121  }
1122
1123  template<typename T> static Type GetScalarType() {
1124    assert(std::is_scalar<T>::value);
1125    return std::is_floating_point<T>::value
1126        ? TYPE_FLOAT
1127        : (std::is_unsigned<T>::value ? TYPE_UINT : TYPE_INT);
1128  }
1129
1130  struct Value {
1131    union {
1132      int64_t i_;
1133      uint64_t u_;
1134      double f_;
1135    };
1136
1137    Type type_;
1138
1139    // For scalars: of itself, for vector: of its elements, for string: length.
1140    BitWidth min_bit_width_;
1141
1142    Value() : i_(0), type_(TYPE_NULL), min_bit_width_(BIT_WIDTH_8) {}
1143
1144    Value(int64_t i, Type t, BitWidth bw)
1145      : i_(i), type_(t), min_bit_width_(bw) {}
1146    Value(uint64_t u, Type t, BitWidth bw)
1147      : u_(u), type_(t), min_bit_width_(bw) {}
1148
1149    Value(float f)
1150      : f_(f), type_(TYPE_FLOAT), min_bit_width_(BIT_WIDTH_32) {}
1151    Value(double f)
1152      : f_(f), type_(TYPE_FLOAT), min_bit_width_(WidthF(f)) {}
1153
1154    uint8_t StoredPackedType(BitWidth parent_bit_width_= BIT_WIDTH_8) const {
1155      return PackedType(StoredWidth(parent_bit_width_), type_);
1156    }
1157
1158    BitWidth ElemWidth(size_t buf_size, size_t elem_index) const {
1159      if (IsInline(type_)) {
1160        return min_bit_width_;
1161      } else {
1162        // We have an absolute offset, but want to store a relative offset
1163        // elem_index elements beyond the current buffer end. Since whether
1164        // the relative offset fits in a certain byte_width depends on
1165        // the size of the elements before it (and their alignment), we have
1166        // to test for each size in turn.
1167        for (size_t byte_width = 1;
1168             byte_width <= sizeof(flatbuffers::largest_scalar_t);
1169             byte_width *= 2) {
1170          // Where are we going to write this offset?
1171          auto offset_loc =
1172            buf_size +
1173            flatbuffers::PaddingBytes(buf_size, byte_width) +
1174            elem_index * byte_width;
1175          // Compute relative offset.
1176          auto offset = offset_loc - u_;
1177          // Does it fit?
1178          auto bit_width = WidthU(offset);
1179          if (1U << bit_width == byte_width) return bit_width;
1180        }
1181        assert(false);  // Must match one of the sizes above.
1182        return BIT_WIDTH_64;
1183      }
1184    }
1185
1186    BitWidth StoredWidth(BitWidth parent_bit_width_ = BIT_WIDTH_8) const {
1187      if (IsInline(type_)) {
1188          return std::max(min_bit_width_, parent_bit_width_);
1189      } else {
1190          return min_bit_width_;
1191      }
1192    }
1193  };
1194
1195  void WriteAny(const Value &val, uint8_t byte_width) {
1196    switch (val.type_) {
1197      case TYPE_NULL:
1198      case TYPE_INT:
1199        Write(val.i_, byte_width);
1200        break;
1201      case TYPE_UINT:
1202        Write(val.u_, byte_width);
1203        break;
1204      case TYPE_FLOAT:
1205        WriteDouble(val.f_, byte_width);
1206        break;
1207      default:
1208        WriteOffset(val.u_, byte_width);
1209        break;
1210    }
1211  }
1212
1213  size_t CreateBlob(const void *data, size_t len, size_t trailing, Type type) {
1214    auto bit_width = WidthU(len);
1215    auto byte_width = Align(bit_width);
1216    Write<uint64_t>(len, byte_width);
1217    auto sloc = buf_.size();
1218    WriteBytes(data, len + trailing);
1219    stack_.push_back(Value(static_cast<uint64_t>(sloc), type, bit_width));
1220    return sloc;
1221  }
1222
1223  template<typename T> size_t ScalarVector(const T *elems, size_t len,
1224                                           bool fixed) {
1225    auto vector_type = GetScalarType<T>();
1226    auto byte_width = sizeof(T);
1227    auto bit_width = WidthB(byte_width);
1228    // If you get this assert, you're trying to write a vector with a size
1229    // field that is bigger than the scalars you're trying to write (e.g. a
1230    // byte vector > 255 elements). For such types, write a "blob" instead.
1231    // TODO: instead of asserting, could write vector with larger elements
1232    // instead, though that would be wasteful.
1233    assert(WidthU(len) <= bit_width);
1234    if (!fixed) Write(len, byte_width);
1235    auto vloc = buf_.size();
1236    for (size_t i = 0; i < len; i++) Write(elems[i], byte_width);
1237    stack_.push_back(Value(static_cast<uint64_t>(vloc),
1238                           ToTypedVector(vector_type, fixed ? len : 0),
1239                           bit_width));
1240    return vloc;
1241  }
1242
1243  Value CreateVector(size_t start, size_t vec_len, size_t step, bool typed,
1244                     bool fixed, const Value *keys = nullptr) {
1245    // Figure out smallest bit width we can store this vector with.
1246    auto bit_width = std::max(force_min_bit_width_, WidthU(vec_len));
1247    auto prefix_elems = 1;
1248    if (keys) {
1249      // If this vector is part of a map, we will pre-fix an offset to the keys
1250      // to this vector.
1251      bit_width = std::max(bit_width, keys->ElemWidth(buf_.size(), 0));
1252      prefix_elems += 2;
1253    }
1254    Type vector_type = TYPE_KEY;
1255    // Check bit widths and types for all elements.
1256    for (size_t i = start; i < stack_.size(); i += step) {
1257      auto elem_width = stack_[i].ElemWidth(buf_.size(), i + prefix_elems);
1258      bit_width = std::max(bit_width, elem_width);
1259      if (typed) {
1260        if (i == start) {
1261          vector_type = stack_[i].type_;
1262        } else {
1263          // If you get this assert, you are writing a typed vector with
1264          // elements that are not all the same type.
1265          assert(vector_type == stack_[i].type_);
1266        }
1267      }
1268    }
1269    // If you get this assert, your fixed types are not one of:
1270    // Int / UInt / Float / Key.
1271    assert(IsTypedVectorElementType(vector_type));
1272    auto byte_width = Align(bit_width);
1273    // Write vector. First the keys width/offset if available, and size.
1274    if (keys) {
1275      WriteOffset(keys->u_, byte_width);
1276      Write(1U << keys->min_bit_width_, byte_width);
1277    }
1278    if (!fixed) Write(vec_len, byte_width);
1279    // Then the actual data.
1280    auto vloc = buf_.size();
1281    for (size_t i = start; i < stack_.size(); i += step) {
1282      WriteAny(stack_[i], byte_width);
1283    }
1284    // Then the types.
1285    if (!typed) {
1286      for (size_t i = start; i < stack_.size(); i += step) {
1287        buf_.push_back(stack_[i].StoredPackedType(bit_width));
1288      }
1289    }
1290    return Value(static_cast<uint64_t>(vloc), keys
1291                         ? TYPE_MAP
1292                         : (typed
1293                            ? ToTypedVector(vector_type, fixed ? vec_len : 0)
1294                            : TYPE_VECTOR),
1295                       bit_width);
1296  }
1297
1298  // You shouldn't really be copying instances of this class.
1299  Builder(const Builder &);
1300  Builder &operator=(const Builder &);
1301
1302  std::vector<uint8_t> buf_;
1303  std::vector<Value> stack_;
1304
1305  bool finished_;
1306
1307  BuilderFlag flags_;
1308
1309  BitWidth force_min_bit_width_;
1310
1311  struct KeyOffsetCompare {
1312    KeyOffsetCompare(const std::vector<uint8_t> &buf) : buf_(&buf) {}
1313    bool operator() (size_t a, size_t b) const {
1314      auto stra = reinterpret_cast<const char *>(buf_->data() + a);
1315      auto strb = reinterpret_cast<const char *>(buf_->data() + b);
1316      return strcmp(stra, strb) < 0;
1317    }
1318    const std::vector<uint8_t> *buf_;
1319  };
1320
1321  typedef std::pair<size_t, size_t> StringOffset;
1322  struct StringOffsetCompare {
1323    StringOffsetCompare(const std::vector<uint8_t> &buf) : buf_(&buf) {}
1324    bool operator() (const StringOffset &a, const StringOffset &b) const {
1325      auto stra = reinterpret_cast<const char *>(buf_->data() + a.first);
1326      auto strb = reinterpret_cast<const char *>(buf_->data() + b.first);
1327      return strncmp(stra, strb, std::min(a.second, b.second) + 1) < 0;
1328    }
1329    const std::vector<uint8_t> *buf_;
1330  };
1331
1332  typedef std::set<size_t, KeyOffsetCompare> KeyOffsetMap;
1333  typedef std::set<StringOffset, StringOffsetCompare> StringOffsetMap;
1334
1335  KeyOffsetMap key_pool;
1336  StringOffsetMap string_pool;
1337};
1338
1339}  // namespace flexbuffers
1340
1341#endif  // FLATBUFFERS_FLEXBUFFERS_H_
1342