inlined_vector.h revision fc0235e04a040f039f9caf5d8e28ee0db0b8abfd
1/* Copyright 2015 Google Inc. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7    http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15
16// An InlinedVector<T,N,A> is like a std::vector<T,A>, except that storage
17// for sequences of length <= N are provided inline without requiring
18// any heap allocation.  Typically N is very small (e.g., 4) so that
19// sequences that are expected to be short do not require allocations.
20//
21// Only some of the std::vector<> operations are currently implemented.
22// Other operations may be added as needed to facilitate migrating
23// code that uses std::vector<> to InlinedVector<>.
24//
25// NOTE: If you want an inlined version to replace use of a
26// std::vector<bool>, consider using util::bitmap::InlinedBitVector<NBITS>
27// in util/bitmap/inlined_bitvector.h
28//
29// TODO(billydonahue): change size_t to size_type where appropriate.
30
31#ifndef TENSORFLOW_LIB_GTL_INLINED_VECTOR_H_
32#define TENSORFLOW_LIB_GTL_INLINED_VECTOR_H_
33
34#include <stddef.h>
35#include <stdlib.h>
36#include <string.h>
37#include <sys/types.h>
38#include <algorithm>
39#include <iterator>
40#include <memory>
41#include <type_traits>
42#include <vector>
43
44#include "tensorflow/core/lib/gtl/manual_constructor.h"
45#include "tensorflow/core/platform/logging.h"
46#include "tensorflow/core/platform/types.h"
47
48#include <initializer_list>  // NOLINT(build/include_order)
49
50namespace tensorflow {
51namespace gtl {
52
53template <typename T, int N, typename A = std::allocator<T> >
54class InlinedVector {
55 public:
56  typedef A allocator_type;
57  typedef typename allocator_type::value_type value_type;
58  typedef typename allocator_type::pointer pointer;
59  typedef typename allocator_type::const_pointer const_pointer;
60  typedef typename allocator_type::reference reference;
61  typedef typename allocator_type::const_reference const_reference;
62  typedef typename allocator_type::size_type size_type;
63  typedef typename allocator_type::difference_type difference_type;
64  typedef pointer iterator;
65  typedef const_pointer const_iterator;
66
67  // Create an empty vector
68  InlinedVector();
69  explicit InlinedVector(const allocator_type& alloc);
70
71  // Create a vector with n copies of value_type().
72  explicit InlinedVector(size_t n);
73
74  // Create a vector with n copies of elem
75  InlinedVector(size_t n, const value_type& elem,
76                const allocator_type& alloc = allocator_type());
77
78  // Create and initialize with the elements [range_start .. range_end).
79  // The unused enable_if argument restricts this constructor so that it is
80  // elided when value_type is an integral type.  This prevents ambiguous
81  // interpretation between a call to this constructor with two integral
82  // arguments and a call to the preceding (n, elem) constructor.
83  template <typename InputIterator>
84  InlinedVector(
85      InputIterator range_start, InputIterator range_end,
86      const allocator_type& alloc = allocator_type(),
87      typename std::enable_if<!std::is_integral<InputIterator>::value>::type* =
88          NULL)
89      : allocator_and_tag_(alloc) {
90    AppendRange(range_start, range_end);
91  }
92
93  InlinedVector(std::initializer_list<value_type> init,
94                const allocator_type& alloc = allocator_type())
95      : allocator_and_tag_(alloc) {
96    AppendRange(init.begin(), init.end());
97  }
98
99  InlinedVector(const InlinedVector& v);
100
101  ~InlinedVector() { clear(); }
102
103  InlinedVector& operator=(const InlinedVector& v) {
104    // Optimized to avoid reallocation.
105    // Prefer reassignment to copy construction for elements.
106    if (size() < v.size()) {  // grow
107      reserve(v.size());
108      std::copy(v.begin(), v.begin() + size(), begin());
109      std::copy(v.begin() + size(), v.end(), std::back_inserter(*this));
110    } else {  // maybe shrink
111      erase(begin() + v.size(), end());
112      std::copy(v.begin(), v.end(), begin());
113    }
114    return *this;
115  }
116
117  size_t size() const {
118    return allocated() ? allocation().size() : tag().size();
119  }
120
121  bool empty() const { return (size() == 0); }
122
123  // Return number of elements that can be stored in vector
124  // without requiring a reallocation of underlying memory
125  size_t capacity() const { return allocated() ? allocation().capacity() : N; }
126
127  // Return a pointer to the underlying array.
128  // Only result[0,size()-1] are defined.
129  const_pointer data() const {
130    return allocated() ? allocated_space() : inlined_space();
131  }
132  pointer data() { return allocated() ? allocated_space() : inlined_space(); }
133
134  // An older name for the more standard-friendly .data().
135  const_pointer array() const { return data(); }
136  pointer mutable_array() { return data(); }
137
138  // Remove all elements
139  void clear() {
140    size_t s = size();
141    if (allocated()) {
142      DestroyAllocated(allocated_space(), allocated_space() + s);
143      allocation().Dealloc(allocator());
144    } else {
145      DestroyInlined(inlined_space(), inlined_space() + s);
146    }
147    tag() = Tag();
148  }
149
150  // Return the ith element
151  // REQUIRES: 0 <= i < size()
152  const value_type& at(size_t i) const {
153    DCHECK_LT(i, size());
154    return array()[i];
155  }
156  const value_type& operator[](size_t i) const {
157    DCHECK_LT(i, size());
158    return array()[i];
159  }
160
161  // Return a non-const reference to the ith element
162  // REQUIRES: 0 <= i < size()
163  value_type& at(size_t i) {
164    DCHECK_LT(i, size());
165    return mutable_array()[i];
166  }
167  value_type& operator[](size_t i) {
168    DCHECK_LT(i, size());
169    return mutable_array()[i];
170  }
171
172  value_type& back() {
173    DCHECK(!empty());
174    return at(size() - 1);
175  }
176
177  const value_type& back() const {
178    DCHECK(!empty());
179    return at(size() - 1);
180  }
181
182  value_type& front() {
183    DCHECK(!empty());
184    return at(0);
185  }
186
187  const value_type& front() const {
188    DCHECK(!empty());
189    return at(0);
190  }
191
192  // Append t to the vector.
193  // Increases size() by one.
194  // Amortized complexity: O(1)
195  // Worst-case complexity: O(size())
196  void push_back(const value_type& t) {
197    size_t s = size();
198    DCHECK_LE(s, capacity());
199    if (s == capacity()) {
200      return GrowAndPushBack(t);
201    }
202    DCHECK_LT(s, capacity());
203
204    if (allocated()) {
205      ConstructAllocated(allocated_space() + s, t);
206    } else {
207      ConstructInlined(inlined_space() + s, t);
208    }
209
210    set_size_internal(s + 1);
211  }
212
213  void pop_back() {
214    DCHECK(!empty());
215    size_t s = size();
216    if (allocated()) {
217      DestroyAllocated(allocated_space() + s - 1, allocated_space() + s);
218    } else {
219      DestroyInlined(inlined_space() + s - 1, inlined_space() + s);
220    }
221    set_size_internal(s - 1);
222  }
223
224  // Resizes the vector to contain "n" elements.
225  // If "n" is smaller than the initial size, extra elements are destroyed.
226  // If "n" is larger than the initial size, enough copies of "elem"
227  // are appended to increase the size to "n". If "elem" is omitted,
228  // new elements are value-initialized.
229  void resize(size_t n);
230  void resize(size_t n, const value_type& elem);
231
232  iterator begin() { return mutable_array(); }
233  const_iterator begin() const { return array(); }
234
235  iterator end() { return mutable_array() + size(); }
236  const_iterator end() const { return array() + size(); }
237
238  iterator insert(iterator pos, const value_type& v);
239
240  iterator erase(iterator pos) {
241    DCHECK_LT(pos, end());
242    DCHECK_GE(pos, begin());
243    std::copy(pos + 1, end(), pos);
244    pop_back();
245    return pos;
246  }
247
248  iterator erase(iterator first, iterator last);
249
250  // Enlarges the underlying representation so it can hold at least
251  // "n" elements without reallocation.
252  // Does not change size() or the actual contents of the vector.
253  void reserve(size_t n) {
254    if (n > capacity()) {
255      // Make room for new elements
256      EnlargeBy(n - size());
257    }
258  }
259
260  // Swap the contents of *this with other.
261  // REQUIRES: value_type is swappable and copyable.
262  void swap(InlinedVector& other);
263
264  allocator_type get_allocator() const { return allocator(); }
265
266 private:
267  struct AllocatorTraits {
268    typedef typename allocator_type::value_type value_type;
269    typedef typename allocator_type::pointer pointer;
270    typedef typename allocator_type::size_type size_type;
271
272    static void construct(allocator_type& a,  // NOLINT(runtime/references)
273                          pointer p) {
274      // Tricky: do we support non-copyable types, or support allocators
275      // that do special things with construct()? Non-copyable types are
276      // needed today, so they are more important. When we sort out the
277      // Android NDK C++11 problem, we will be able to use the proper
278      // std::allocator_traits<A>::construct(p, ...).
279      //
280      // a.construct(p, value_type());
281      new (p) value_type();
282    }
283    static void construct(allocator_type& a,  // NOLINT(runtime/references)
284                          pointer p, const value_type& t) {
285      a.construct(p, t);
286    }
287    static void destroy(allocator_type& a,  // NOLINT(runtime/references)
288                        pointer p) {
289      a.destroy(p);
290    }
291    static pointer allocate(allocator_type& a,  // NOLINT(runtime/references)
292                            size_type n) {
293      return a.allocate(n);
294    }
295    static void deallocate(allocator_type& a,  // NOLINT(runtime/references)
296                           pointer p, size_type n) {
297      a.deallocate(p, n);
298    }
299  };
300
301  // If the vector is inlined, holds the size of the vector.
302  // If the vector is allocated, holds the special value kAllocated,
303  // and the size is stored in the vector's Allocation.
304  class Tag {
305   public:
306    Tag() : size_(0) {}
307    size_t size() const { return size_; }
308    void set_size(size_t n) { size_ = n; }
309    bool allocated() const { return size_ == kAllocated; }
310    void set_allocated() { size_ = kAllocated; }
311
312   private:
313    static const size_t kAllocated = -1;
314    size_t size_;
315  };
316
317  // Derives from allocator_type to use the empty base class optimization.
318  // If the allocator_type is stateless, we can 'store'
319  // our instance of it for free.
320  class AllocatorAndTag : private allocator_type {
321   public:
322    explicit AllocatorAndTag(const allocator_type& a, Tag t = Tag())
323        : allocator_type(a), tag_(t) {}
324    Tag& tag() { return tag_; }
325    const Tag& tag() const { return tag_; }
326    allocator_type& allocator() { return *this; }
327    const allocator_type& allocator() const { return *this; }
328
329   private:
330    Tag tag_;
331  };
332
333  class Allocation {
334   public:
335    Allocation(allocator_type& a,  // NOLINT(runtime/references)
336               size_t capacity)
337        : size_(0),
338          capacity_(capacity),
339          buffer_(AllocatorTraits::allocate(a, capacity_)) {}
340
341    void Dealloc(allocator_type& a) {  // NOLINT(runtime/references)
342      AllocatorTraits::deallocate(a, buffer(), capacity());
343    }
344
345    size_t size() const { return size_; }
346    void set_size(size_t s) { size_ = s; }
347    size_t capacity() const { return capacity_; }
348    const value_type* buffer() const { return buffer_; }
349    value_type* buffer() { return buffer_; }
350
351   private:
352    size_t size_;
353    size_t capacity_;
354    value_type* buffer_;
355  };
356
357  const Tag& tag() const { return allocator_and_tag_.tag(); }
358  Tag& tag() { return allocator_and_tag_.tag(); }
359
360  Allocation& allocation() { return *rep_.allocation_storage.allocation.get(); }
361  const Allocation& allocation() const {
362    return *rep_.allocation_storage.allocation.get();
363  }
364  void init_allocation(const Allocation& allocation) {
365    rep_.allocation_storage.allocation.Init(allocation);
366  }
367
368  value_type* inlined_space() { return rep_.inlined_storage.inlined[0].get(); }
369  const value_type* inlined_space() const {
370    return rep_.inlined_storage.inlined[0].get();
371  }
372
373  value_type* allocated_space() { return allocation().buffer(); }
374  const value_type* allocated_space() const { return allocation().buffer(); }
375
376  const allocator_type& allocator() const {
377    return allocator_and_tag_.allocator();
378  }
379  allocator_type& allocator() { return allocator_and_tag_.allocator(); }
380
381  bool allocated() const { return tag().allocated(); }
382  void set_allocated() { return tag().set_allocated(); }
383
384  void set_size_internal(size_t n) {
385    if (allocated()) {
386      allocation().set_size(n);
387    } else {
388      tag().set_size(n);
389    }
390  }
391
392  // Enlarge the underlying representation so we can store size_ + delta elems.
393  // The size is not changed, and any newly added memory is not initialized.
394  void EnlargeBy(size_t delta);
395
396  void ResetAllocation(Allocation new_allocation) {
397    if (allocated()) {
398      DestroyAllocated(allocated_space(), allocated_space() + size());
399      DCHECK_EQ(begin(), allocated_space());
400      allocation().Dealloc(allocator());
401      allocation() = new_allocation;
402    } else {
403      DestroyInlined(inlined_space(), inlined_space() + size());
404      init_allocation(new_allocation);  // bug: only init once
405      set_allocated();
406    }
407  }
408
409  void GrowAndPushBack(const value_type& t) {
410    DCHECK_EQ(size(), capacity());
411    const size_t s = size();
412
413    Allocation new_allocation(allocator(), 2 * capacity());
414    new_allocation.set_size(s + 1);
415
416    UninitializedCopyAllocated(array(), array() + s, new_allocation.buffer());
417    ConstructAllocated(new_allocation.buffer() + s, t);
418
419    ResetAllocation(new_allocation);
420  }
421
422  void InitAssign(size_t n);
423  void InitAssign(size_t n, const value_type& t);
424
425  void ConstructInlined(pointer p) { new (p) value_type(); }
426
427  void ConstructInlined(pointer p, const value_type& t) {
428    new (p) value_type(t);
429  }
430
431  void ConstructAllocated(pointer p) {
432    AllocatorTraits::construct(allocator(), p);
433  }
434  void ConstructAllocated(pointer p, const value_type& t) {
435    AllocatorTraits::construct(allocator(), p, t);
436  }
437
438  template <typename Iter>
439  void UninitializedCopyInlined(Iter src, Iter src_last, value_type* dst) {
440    std::uninitialized_copy(src, src_last, dst);
441  }
442
443  template <typename Iter>
444  void UninitializedCopyAllocated(Iter src, Iter src_last, value_type* dst) {
445    for (; src != src_last; ++dst, ++src) ConstructAllocated(dst, *src);
446  }
447
448  void UninitializedFillInlined(value_type* dst, value_type* dst_last) {
449    for (; dst != dst_last; ++dst) ConstructInlined(dst);
450  }
451  void UninitializedFillInlined(value_type* dst, value_type* dst_last,
452                                const value_type& t) {
453    std::uninitialized_fill(dst, dst_last, t);
454  }
455
456  void UninitializedFillAllocated(value_type* dst, value_type* dst_last) {
457    for (; dst != dst_last; ++dst) ConstructAllocated(dst);
458  }
459  void UninitializedFillAllocated(value_type* dst, value_type* dst_last,
460                                  const value_type& t) {
461    for (; dst != dst_last; ++dst) ConstructAllocated(dst, t);
462  }
463
464  // Destroy [ptr, ptr_last) in place.
465  void DestroyInlined(value_type* ptr, value_type* ptr_last);
466  void DestroyAllocated(value_type* ptr, value_type* ptr_last);
467
468  template <typename Iter>
469  void AppendRange(Iter first, Iter last, std::input_iterator_tag);
470
471  // Faster path for forward iterators.
472  template <typename Iter>
473  void AppendRange(Iter first, Iter last, std::forward_iterator_tag);
474
475  template <typename Iter>
476  void AppendRange(Iter first, Iter last);
477
478  AllocatorAndTag allocator_and_tag_;
479
480  // Either the inlined or allocated representation
481  union Rep {
482    // Use struct to perform indirection that solves a bizarre compilation
483    // error on Visual Studio (all known versions).
484    struct {
485      tensorflow::ManualConstructor<value_type> inlined[N];
486    } inlined_storage;
487    struct {
488      tensorflow::ManualConstructor<Allocation> allocation;
489    } allocation_storage;
490  } rep_;
491};
492
493template <typename T, int N, typename A>
494const size_t InlinedVector<T, N, A>::Tag::kAllocated;
495
496template <typename T, int N, typename A>
497inline void swap(InlinedVector<T, N, A>& a, InlinedVector<T, N, A>& b) {
498  a.swap(b);
499}
500
501template <typename T, int N, typename A>
502inline bool operator==(const InlinedVector<T, N, A>& a,
503                       const InlinedVector<T, N, A>& b) {
504  return a.size() == b.size() && std::equal(a.begin(), a.end(), b.begin());
505}
506
507template <typename T, int N, typename A>
508inline bool operator!=(const InlinedVector<T, N, A>& a,
509                       const InlinedVector<T, N, A>& b) {
510  return !(a == b);
511}
512
513template <typename T, int N, typename A>
514inline bool operator<(const InlinedVector<T, N, A>& a,
515                      const InlinedVector<T, N, A>& b) {
516  return std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
517}
518
519template <typename T, int N, typename A>
520inline bool operator>(const InlinedVector<T, N, A>& a,
521                      const InlinedVector<T, N, A>& b) {
522  return b < a;
523}
524
525template <typename T, int N, typename A>
526inline bool operator<=(const InlinedVector<T, N, A>& a,
527                       const InlinedVector<T, N, A>& b) {
528  return !(b < a);
529}
530
531template <typename T, int N, typename A>
532inline bool operator>=(const InlinedVector<T, N, A>& a,
533                       const InlinedVector<T, N, A>& b) {
534  return !(a < b);
535}
536
537// ========================================
538// Implementation
539
540template <typename T, int N, typename A>
541inline InlinedVector<T, N, A>::InlinedVector()
542    : allocator_and_tag_(allocator_type()) {}
543
544template <typename T, int N, typename A>
545inline InlinedVector<T, N, A>::InlinedVector(const allocator_type& alloc)
546    : allocator_and_tag_(alloc) {}
547
548template <typename T, int N, typename A>
549inline InlinedVector<T, N, A>::InlinedVector(size_t n)
550    : allocator_and_tag_(allocator_type()) {
551  InitAssign(n);
552}
553
554template <typename T, int N, typename A>
555inline InlinedVector<T, N, A>::InlinedVector(size_t n, const value_type& elem,
556                                             const allocator_type& alloc)
557    : allocator_and_tag_(alloc) {
558  InitAssign(n, elem);
559}
560
561template <typename T, int N, typename A>
562inline InlinedVector<T, N, A>::InlinedVector(const InlinedVector& v)
563    : allocator_and_tag_(v.allocator()) {
564  reserve(v.size());
565  if (allocated()) {
566    UninitializedCopyAllocated(v.begin(), v.end(), allocated_space());
567  } else {
568    UninitializedCopyInlined(v.begin(), v.end(), inlined_space());
569  }
570  set_size_internal(v.size());
571}
572
573template <typename T, int N, typename A>
574inline void InlinedVector<T, N, A>::InitAssign(size_t n, const value_type& t) {
575  if (n > static_cast<size_t>(N)) {
576    Allocation new_allocation(allocator(), n);
577    init_allocation(new_allocation);
578    set_allocated();
579    UninitializedFillAllocated(allocated_space(), allocated_space() + n, t);
580  } else {
581    UninitializedFillInlined(inlined_space(), inlined_space() + n, t);
582  }
583  set_size_internal(n);
584}
585
586template <typename T, int N, typename A>
587inline void InlinedVector<T, N, A>::InitAssign(size_t n) {
588  if (n > static_cast<size_t>(N)) {
589    Allocation new_allocation(allocator(), n);
590    init_allocation(new_allocation);
591    set_allocated();
592    UninitializedFillAllocated(allocated_space(), allocated_space() + n);
593  } else {
594    UninitializedFillInlined(inlined_space(), inlined_space() + n);
595  }
596  set_size_internal(n);
597}
598
599template <typename T, int N, typename A>
600inline void InlinedVector<T, N, A>::resize(size_t n) {
601  size_t s = size();
602  if (n < s) {
603    erase(begin() + n, end());
604    return;
605  }
606  reserve(n);
607  DCHECK_GE(capacity(), n);
608
609  // Fill new space with elements constructed in-place.
610  if (allocated()) {
611    UninitializedFillAllocated(allocated_space() + s, allocated_space() + n);
612  } else {
613    UninitializedFillInlined(inlined_space() + s, inlined_space() + n);
614  }
615  set_size_internal(n);
616}
617
618template <typename T, int N, typename A>
619inline void InlinedVector<T, N, A>::resize(size_t n, const value_type& elem) {
620  size_t s = size();
621  if (n < s) {
622    erase(begin() + n, end());
623    return;
624  }
625  reserve(n);
626  DCHECK_GE(capacity(), n);
627
628  // Fill new space with copies of 'elem'.
629  if (allocated()) {
630    UninitializedFillAllocated(allocated_space() + s, allocated_space() + n,
631                               elem);
632  } else {
633    UninitializedFillInlined(inlined_space() + s, inlined_space() + n, elem);
634  }
635  set_size_internal(n);
636}
637
638template <typename T, int N, typename A>
639typename InlinedVector<T, N, A>::iterator InlinedVector<T, N, A>::insert(
640    iterator pos, const value_type& v) {
641  DCHECK_GE(pos, begin());
642  DCHECK_LE(pos, end());
643  if (pos == end()) {
644    push_back(v);
645    return end() - 1;
646  }
647  size_t s = size();
648  size_t idx = std::distance(begin(), pos);
649  if (s == capacity()) {
650    EnlargeBy(1);
651  }
652  CHECK_LT(s, capacity());
653  pos = begin() + idx;  // Reset 'pos' into a post-enlarge iterator.
654
655  if (allocated()) {
656    ConstructAllocated(allocated_space() + s, *(allocated_space() + s - 1));
657    std::copy_backward(pos, allocated_space() + s - 1, allocated_space() + s);
658  } else {
659    ConstructInlined(inlined_space() + s, *(inlined_space() + s - 1));
660    std::copy_backward(pos, inlined_space() + s - 1, inlined_space() + s);
661  }
662
663  *pos = v;
664
665  set_size_internal(s + 1);
666  return pos;
667}
668
669template <typename T, int N, typename A>
670typename InlinedVector<T, N, A>::iterator InlinedVector<T, N, A>::erase(
671    iterator first, iterator last) {
672  DCHECK_LE(begin(), first);
673  DCHECK_LE(first, last);
674  DCHECK_LE(last, end());
675
676  size_t s = size();
677  ptrdiff_t erase_gap = std::distance(first, last);
678
679  if (allocated()) {
680    std::copy(last, allocated_space() + s, first);
681    DestroyAllocated(allocated_space() + s - erase_gap, allocated_space() + s);
682  } else {
683    std::copy(last, inlined_space() + s, first);
684    DestroyInlined(inlined_space() + s - erase_gap, inlined_space() + s);
685  }
686
687  set_size_internal(size() - erase_gap);
688
689  return first;
690}
691
692template <typename T, int N, typename A>
693void InlinedVector<T, N, A>::swap(InlinedVector& other) {
694  using std::swap;  // Augment ADL with std::swap.
695  if (&other == this) {
696    return;
697  }
698  if (allocated() && other.allocated()) {
699    // Both out of line, so just swap the tag, allocation, and allocator.
700    swap(tag(), other.tag());
701    swap(allocation(), other.allocation());
702    swap(allocator(), other.allocator());
703    return;
704  }
705  if (!allocated() && !other.allocated()) {
706    // Both inlined: swap up to smaller size, then move remaining elements.
707    InlinedVector* a = this;
708    InlinedVector* b = &other;
709    if (size() < other.size()) {
710      swap(a, b);
711    }
712
713    const size_t a_size = a->size();
714    const size_t b_size = b->size();
715    DCHECK_GE(a_size, b_size);
716    // 'a' is larger. Swap the elements up to the smaller array size.
717    std::swap_ranges(a->inlined_space(), a->inlined_space() + b_size,
718                     b->inlined_space());
719
720    // Move the remaining elements: A[b_size,a_size) -> B[b_size,a_size)
721    b->UninitializedCopyInlined(a->inlined_space() + b_size,
722                                a->inlined_space() + a_size,
723                                b->inlined_space() + b_size);
724    a->DestroyInlined(a->inlined_space() + b_size, a->inlined_space() + a_size);
725
726    swap(a->tag(), b->tag());
727    swap(a->allocator(), b->allocator());
728    DCHECK_EQ(b->size(), a_size);
729    DCHECK_EQ(a->size(), b_size);
730    return;
731  }
732  // One is out of line, one is inline.
733  // We first move the elements from the inlined vector into the
734  // inlined space in the other vector.  We then put the other vector's
735  // pointer/capacity into the originally inlined vector and swap
736  // the tags.
737  InlinedVector* a = this;
738  InlinedVector* b = &other;
739  if (a->allocated()) {
740    swap(a, b);
741  }
742  DCHECK(!a->allocated());
743  DCHECK(b->allocated());
744  const size_t a_size = a->size();
745  const size_t b_size = b->size();
746
747  // Made Local copies of size(), don't need tag() accurate anymore
748  swap(a->tag(), b->tag());
749
750  // Copy b_allocation out before b's union gets clobbered by inline_space.
751  Allocation b_allocation = b->allocation();
752
753  b->UninitializedCopyInlined(a->inlined_space(), a->inlined_space() + a_size,
754                              b->inlined_space());
755  a->DestroyInlined(a->inlined_space(), a->inlined_space() + a_size);
756
757  a->allocation() = b_allocation;
758
759  if (a->allocator() != b->allocator()) {
760    swap(a->allocator(), b->allocator());
761  }
762
763  DCHECK_EQ(b->size(), a_size);
764  DCHECK_EQ(a->size(), b_size);
765}
766
767template <typename T, int N, typename A>
768void InlinedVector<T, N, A>::EnlargeBy(size_t delta) {
769  const size_t s = size();
770  DCHECK_LE(s, capacity());
771
772  size_t target = std::max(static_cast<size_t>(N), s + delta);
773
774  // Compute new capacity by repeatedly doubling current capacity
775  // TODO(psrc): Check and avoid overflow?
776  size_t new_capacity = capacity();
777  while (new_capacity < target) {
778    new_capacity <<= 1;
779  }
780
781  Allocation new_allocation(allocator(), new_capacity);
782  new_allocation.set_size(s);
783
784  UninitializedCopyAllocated(array(), array() + s, new_allocation.buffer());
785
786  ResetAllocation(new_allocation);
787}
788
789template <typename T, int N, typename A>
790inline void InlinedVector<T, N, A>::DestroyInlined(value_type* ptr,
791                                                   value_type* ptr_last) {
792  for (value_type* p = ptr; p != ptr_last; ++p) {
793    p->~value_type();
794  }
795
796// Overwrite unused memory with 0xab so we can catch uninitialized usage.
797// Cast to void* to tell the compiler that we don't care that we might be
798// scribbling on a vtable pointer.
799#ifndef NDEBUG
800  if (ptr != ptr_last) {
801    memset(reinterpret_cast<void*>(ptr), 0xab, sizeof(*ptr) * (ptr_last - ptr));
802  }
803#endif
804}
805
806template <typename T, int N, typename A>
807inline void InlinedVector<T, N, A>::DestroyAllocated(value_type* ptr,
808                                                     value_type* ptr_last) {
809  for (value_type* p = ptr; p != ptr_last; ++p) {
810    AllocatorTraits::destroy(allocator(), p);
811  }
812
813// Overwrite unused memory with 0xab so we can catch uninitialized usage.
814// Cast to void* to tell the compiler that we don't care that we might be
815// scribbling on a vtable pointer.
816#ifndef NDEBUG
817  if (ptr != ptr_last) {
818    memset(reinterpret_cast<void*>(ptr), 0xab, sizeof(*ptr) * (ptr_last - ptr));
819  }
820#endif
821}
822
823template <typename T, int N, typename A>
824template <typename Iter>
825inline void InlinedVector<T, N, A>::AppendRange(Iter first, Iter last,
826                                                std::input_iterator_tag) {
827  std::copy(first, last, std::back_inserter(*this));
828}
829
830template <typename T, int N, typename A>
831template <typename Iter>
832inline void InlinedVector<T, N, A>::AppendRange(Iter first, Iter last,
833                                                std::forward_iterator_tag) {
834  typedef typename std::iterator_traits<Iter>::difference_type Length;
835  Length length = std::distance(first, last);
836  reserve(size() + length);
837  if (allocated()) {
838    UninitializedCopyAllocated(first, last, allocated_space() + size());
839  } else {
840    UninitializedCopyInlined(first, last, inlined_space() + size());
841  }
842  set_size_internal(size() + length);
843}
844
845template <typename T, int N, typename A>
846template <typename Iter>
847inline void InlinedVector<T, N, A>::AppendRange(Iter first, Iter last) {
848  typedef typename std::iterator_traits<Iter>::iterator_category IterTag;
849  AppendRange(first, last, IterTag());
850}
851
852}  // namespace gtl
853}  // namespace tensorflow
854
855#endif  // TENSORFLOW_LIB_GTL_INLINED_VECTOR_H_
856