DenseMap.h revision 2430973fb657eb84dfbacb1e8886d3a29190e0b5
1//===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the DenseMap class.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_ADT_DENSEMAP_H
15#define LLVM_ADT_DENSEMAP_H
16
17#include "llvm/Support/Compiler.h"
18#include "llvm/Support/AlignOf.h"
19#include "llvm/Support/MathExtras.h"
20#include "llvm/Support/PointerLikeTypeTraits.h"
21#include "llvm/Support/type_traits.h"
22#include "llvm/ADT/DenseMapInfo.h"
23#include <algorithm>
24#include <iterator>
25#include <new>
26#include <utility>
27#include <cassert>
28#include <climits>
29#include <cstddef>
30#include <cstring>
31
32namespace llvm {
33
34template<typename KeyT, typename ValueT,
35         typename KeyInfoT = DenseMapInfo<KeyT>,
36         bool IsConst = false>
37class DenseMapIterator;
38
39template<typename DerivedT,
40         typename KeyT, typename ValueT, typename KeyInfoT>
41class DenseMapBase {
42protected:
43  typedef std::pair<KeyT, ValueT> BucketT;
44
45public:
46  typedef KeyT key_type;
47  typedef ValueT mapped_type;
48  typedef BucketT value_type;
49
50  typedef DenseMapIterator<KeyT, ValueT, KeyInfoT> iterator;
51  typedef DenseMapIterator<KeyT, ValueT,
52                           KeyInfoT, true> const_iterator;
53  inline iterator begin() {
54    // When the map is empty, avoid the overhead of AdvancePastEmptyBuckets().
55    return empty() ? end() : iterator(getBuckets(), getBucketsEnd());
56  }
57  inline iterator end() {
58    return iterator(getBucketsEnd(), getBucketsEnd(), true);
59  }
60  inline const_iterator begin() const {
61    return empty() ? end() : const_iterator(getBuckets(), getBucketsEnd());
62  }
63  inline const_iterator end() const {
64    return const_iterator(getBucketsEnd(), getBucketsEnd(), true);
65  }
66
67  bool empty() const { return getNumEntries() == 0; }
68  unsigned size() const { return getNumEntries(); }
69
70  /// Grow the densemap so that it has at least Size buckets. Does not shrink
71  void resize(size_t Size) {
72    if (Size > getNumBuckets())
73      grow(Size);
74  }
75
76  void clear() {
77    if (getNumEntries() == 0 && getNumTombstones() == 0) return;
78
79    // If the capacity of the array is huge, and the # elements used is small,
80    // shrink the array.
81    if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
82      shrink_and_clear();
83      return;
84    }
85
86    const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
87    for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
88      if (!KeyInfoT::isEqual(P->first, EmptyKey)) {
89        if (!KeyInfoT::isEqual(P->first, TombstoneKey)) {
90          P->second.~ValueT();
91          decrementNumEntries();
92        }
93        P->first = EmptyKey;
94      }
95    }
96    assert(getNumEntries() == 0 && "Node count imbalance!");
97    setNumTombstones(0);
98  }
99
100  /// count - Return true if the specified key is in the map.
101  bool count(const KeyT &Val) const {
102    const BucketT *TheBucket;
103    return LookupBucketFor(Val, TheBucket);
104  }
105
106  iterator find(const KeyT &Val) {
107    BucketT *TheBucket;
108    if (LookupBucketFor(Val, TheBucket))
109      return iterator(TheBucket, getBucketsEnd(), true);
110    return end();
111  }
112  const_iterator find(const KeyT &Val) const {
113    const BucketT *TheBucket;
114    if (LookupBucketFor(Val, TheBucket))
115      return const_iterator(TheBucket, getBucketsEnd(), true);
116    return end();
117  }
118
119  /// Alternate version of find() which allows a different, and possibly
120  /// less expensive, key type.
121  /// The DenseMapInfo is responsible for supplying methods
122  /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
123  /// type used.
124  template<class LookupKeyT>
125  iterator find_as(const LookupKeyT &Val) {
126    BucketT *TheBucket;
127    if (LookupBucketFor(Val, TheBucket))
128      return iterator(TheBucket, getBucketsEnd(), true);
129    return end();
130  }
131  template<class LookupKeyT>
132  const_iterator find_as(const LookupKeyT &Val) const {
133    const BucketT *TheBucket;
134    if (LookupBucketFor(Val, TheBucket))
135      return const_iterator(TheBucket, getBucketsEnd(), true);
136    return end();
137  }
138
139  /// lookup - Return the entry for the specified key, or a default
140  /// constructed value if no such entry exists.
141  ValueT lookup(const KeyT &Val) const {
142    const BucketT *TheBucket;
143    if (LookupBucketFor(Val, TheBucket))
144      return TheBucket->second;
145    return ValueT();
146  }
147
148  // Inserts key,value pair into the map if the key isn't already in the map.
149  // If the key is already in the map, it returns false and doesn't update the
150  // value.
151  std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
152    BucketT *TheBucket;
153    if (LookupBucketFor(KV.first, TheBucket))
154      return std::make_pair(iterator(TheBucket, getBucketsEnd(), true),
155                            false); // Already in map.
156
157    // Otherwise, insert the new element.
158    TheBucket = InsertIntoBucket(KV.first, KV.second, TheBucket);
159    return std::make_pair(iterator(TheBucket, getBucketsEnd(), true), true);
160  }
161
162  /// insert - Range insertion of pairs.
163  template<typename InputIt>
164  void insert(InputIt I, InputIt E) {
165    for (; I != E; ++I)
166      insert(*I);
167  }
168
169
170  bool erase(const KeyT &Val) {
171    BucketT *TheBucket;
172    if (!LookupBucketFor(Val, TheBucket))
173      return false; // not in map.
174
175    TheBucket->second.~ValueT();
176    TheBucket->first = getTombstoneKey();
177    decrementNumEntries();
178    incrementNumTombstones();
179    return true;
180  }
181  void erase(iterator I) {
182    BucketT *TheBucket = &*I;
183    TheBucket->second.~ValueT();
184    TheBucket->first = getTombstoneKey();
185    decrementNumEntries();
186    incrementNumTombstones();
187  }
188
189  value_type& FindAndConstruct(const KeyT &Key) {
190    BucketT *TheBucket;
191    if (LookupBucketFor(Key, TheBucket))
192      return *TheBucket;
193
194    return *InsertIntoBucket(Key, ValueT(), TheBucket);
195  }
196
197  ValueT &operator[](const KeyT &Key) {
198    return FindAndConstruct(Key).second;
199  }
200
201#if LLVM_USE_RVALUE_REFERENCES
202  value_type& FindAndConstruct(KeyT &&Key) {
203    BucketT *TheBucket;
204    if (LookupBucketFor(Key, TheBucket))
205      return *TheBucket;
206
207    return *InsertIntoBucket(Key, ValueT(), TheBucket);
208  }
209
210  ValueT &operator[](KeyT &&Key) {
211    return FindAndConstruct(Key).second;
212  }
213#endif
214
215  /// isPointerIntoBucketsArray - Return true if the specified pointer points
216  /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
217  /// value in the DenseMap).
218  bool isPointerIntoBucketsArray(const void *Ptr) const {
219    return Ptr >= getBuckets() && Ptr < getBucketsEnd();
220  }
221
222  /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
223  /// array.  In conjunction with the previous method, this can be used to
224  /// determine whether an insertion caused the DenseMap to reallocate.
225  const void *getPointerIntoBucketsArray() const { return getBuckets(); }
226
227protected:
228  DenseMapBase() {}
229
230  void destroyAll() {
231    if (getNumBuckets() == 0) // Nothing to do.
232      return;
233
234    const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
235    for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
236      if (!KeyInfoT::isEqual(P->first, EmptyKey) &&
237          !KeyInfoT::isEqual(P->first, TombstoneKey))
238        P->second.~ValueT();
239      P->first.~KeyT();
240    }
241
242#ifndef NDEBUG
243    memset((void*)getBuckets(), 0x5a, sizeof(BucketT)*getNumBuckets());
244#endif
245  }
246
247  void initEmpty() {
248    setNumEntries(0);
249    setNumTombstones(0);
250
251    assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
252           "# initial buckets must be a power of two!");
253    const KeyT EmptyKey = getEmptyKey();
254    for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
255      new (&B->first) KeyT(EmptyKey);
256  }
257
258  void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
259    initEmpty();
260
261    // Insert all the old elements.
262    const KeyT EmptyKey = getEmptyKey();
263    const KeyT TombstoneKey = getTombstoneKey();
264    for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
265      if (!KeyInfoT::isEqual(B->first, EmptyKey) &&
266          !KeyInfoT::isEqual(B->first, TombstoneKey)) {
267        // Insert the key/value into the new table.
268        BucketT *DestBucket;
269        bool FoundVal = LookupBucketFor(B->first, DestBucket);
270        (void)FoundVal; // silence warning.
271        assert(!FoundVal && "Key already in new map?");
272        DestBucket->first = llvm_move(B->first);
273        new (&DestBucket->second) ValueT(llvm_move(B->second));
274        incrementNumEntries();
275
276        // Free the value.
277        B->second.~ValueT();
278      }
279      B->first.~KeyT();
280    }
281
282#ifndef NDEBUG
283    if (OldBucketsBegin != OldBucketsEnd)
284      memset((void*)OldBucketsBegin, 0x5a,
285             sizeof(BucketT) * (OldBucketsEnd - OldBucketsBegin));
286#endif
287  }
288
289  template <typename OtherBaseT>
290  void copyFrom(const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT>& other) {
291    assert(getNumBuckets() == other.getNumBuckets());
292
293    setNumEntries(other.getNumEntries());
294    setNumTombstones(other.getNumTombstones());
295
296    if (isPodLike<KeyT>::value && isPodLike<ValueT>::value)
297      memcpy(getBuckets(), other.getBuckets(),
298             getNumBuckets() * sizeof(BucketT));
299    else
300      for (size_t i = 0; i < getNumBuckets(); ++i) {
301        new (&getBuckets()[i].first) KeyT(other.getBuckets()[i].first);
302        if (!KeyInfoT::isEqual(getBuckets()[i].first, getEmptyKey()) &&
303            !KeyInfoT::isEqual(getBuckets()[i].first, getTombstoneKey()))
304          new (&getBuckets()[i].second) ValueT(other.getBuckets()[i].second);
305      }
306  }
307
308  void swap(DenseMapBase& RHS) {
309    std::swap(getNumEntries(), RHS.getNumEntries());
310    std::swap(getNumTombstones(), RHS.getNumTombstones());
311  }
312
313  static unsigned getHashValue(const KeyT &Val) {
314    return KeyInfoT::getHashValue(Val);
315  }
316  template<typename LookupKeyT>
317  static unsigned getHashValue(const LookupKeyT &Val) {
318    return KeyInfoT::getHashValue(Val);
319  }
320  static const KeyT getEmptyKey() {
321    return KeyInfoT::getEmptyKey();
322  }
323  static const KeyT getTombstoneKey() {
324    return KeyInfoT::getTombstoneKey();
325  }
326
327private:
328  unsigned getNumEntries() const {
329    return static_cast<const DerivedT *>(this)->getNumEntries();
330  }
331  void setNumEntries(unsigned Num) {
332    static_cast<DerivedT *>(this)->setNumEntries(Num);
333  }
334  void incrementNumEntries() {
335    setNumEntries(getNumEntries() + 1);
336  }
337  void decrementNumEntries() {
338    setNumEntries(getNumEntries() - 1);
339  }
340  unsigned getNumTombstones() const {
341    return static_cast<const DerivedT *>(this)->getNumTombstones();
342  }
343  void setNumTombstones(unsigned Num) {
344    static_cast<DerivedT *>(this)->setNumTombstones(Num);
345  }
346  void incrementNumTombstones() {
347    setNumTombstones(getNumTombstones() + 1);
348  }
349  void decrementNumTombstones() {
350    setNumTombstones(getNumTombstones() - 1);
351  }
352  const BucketT *getBuckets() const {
353    return static_cast<const DerivedT *>(this)->getBuckets();
354  }
355  BucketT *getBuckets() {
356    return static_cast<DerivedT *>(this)->getBuckets();
357  }
358  unsigned getNumBuckets() const {
359    return static_cast<const DerivedT *>(this)->getNumBuckets();
360  }
361  BucketT *getBucketsEnd() {
362    return getBuckets() + getNumBuckets();
363  }
364  const BucketT *getBucketsEnd() const {
365    return getBuckets() + getNumBuckets();
366  }
367
368  void grow(unsigned AtLeast) {
369    static_cast<DerivedT *>(this)->grow(AtLeast);
370  }
371
372  void shrink_and_clear() {
373    static_cast<DerivedT *>(this)->shrink_and_clear();
374  }
375
376
377  BucketT *InsertIntoBucket(const KeyT &Key, const ValueT &Value,
378                            BucketT *TheBucket) {
379    TheBucket = InsertIntoBucketImpl(Key, TheBucket);
380
381    TheBucket->first = Key;
382    new (&TheBucket->second) ValueT(Value);
383    return TheBucket;
384  }
385
386#if LLVM_USE_RVALUE_REFERENCES
387  BucketT *InsertIntoBucket(const KeyT &Key, ValueT &&Value,
388                            BucketT *TheBucket) {
389    TheBucket = InsertIntoBucketImpl(Key, TheBucket);
390
391    TheBucket->first = Key;
392    new (&TheBucket->second) ValueT(std::move(Value));
393    return TheBucket;
394  }
395
396  BucketT *InsertIntoBucket(KeyT &&Key, ValueT &&Value, BucketT *TheBucket) {
397    TheBucket = InsertIntoBucketImpl(Key, TheBucket);
398
399    TheBucket->first = std::move(Key);
400    new (&TheBucket->second) ValueT(std::move(Value));
401    return TheBucket;
402  }
403#endif
404
405  BucketT *InsertIntoBucketImpl(const KeyT &Key, BucketT *TheBucket) {
406    // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
407    // the buckets are empty (meaning that many are filled with tombstones),
408    // grow the table.
409    //
410    // The later case is tricky.  For example, if we had one empty bucket with
411    // tons of tombstones, failing lookups (e.g. for insertion) would have to
412    // probe almost the entire table until it found the empty bucket.  If the
413    // table completely filled with tombstones, no lookup would ever succeed,
414    // causing infinite loops in lookup.
415    unsigned NewNumEntries = getNumEntries() + 1;
416    unsigned NumBuckets = getNumBuckets();
417    if (NewNumEntries*4 >= NumBuckets*3) {
418      this->grow(NumBuckets * 2);
419      LookupBucketFor(Key, TheBucket);
420      NumBuckets = getNumBuckets();
421    }
422    if (NumBuckets-(NewNumEntries+getNumTombstones()) <= NumBuckets/8) {
423      this->grow(NumBuckets * 2);
424      LookupBucketFor(Key, TheBucket);
425    }
426    assert(TheBucket);
427
428    // Only update the state after we've grown our bucket space appropriately
429    // so that when growing buckets we have self-consistent entry count.
430    incrementNumEntries();
431
432    // If we are writing over a tombstone, remember this.
433    if (!KeyInfoT::isEqual(TheBucket->first, getEmptyKey()))
434      decrementNumTombstones();
435
436    return TheBucket;
437  }
438
439  /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
440  /// FoundBucket.  If the bucket contains the key and a value, this returns
441  /// true, otherwise it returns a bucket with an empty marker or tombstone and
442  /// returns false.
443  template<typename LookupKeyT>
444  bool LookupBucketFor(const LookupKeyT &Val,
445                       const BucketT *&FoundBucket) const {
446    const BucketT *BucketsPtr = getBuckets();
447    const unsigned NumBuckets = getNumBuckets();
448
449    if (NumBuckets == 0) {
450      FoundBucket = 0;
451      return false;
452    }
453
454    // FoundTombstone - Keep track of whether we find a tombstone while probing.
455    const BucketT *FoundTombstone = 0;
456    const KeyT EmptyKey = getEmptyKey();
457    const KeyT TombstoneKey = getTombstoneKey();
458    assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
459           !KeyInfoT::isEqual(Val, TombstoneKey) &&
460           "Empty/Tombstone value shouldn't be inserted into map!");
461
462    unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
463    unsigned ProbeAmt = 1;
464    while (1) {
465      const BucketT *ThisBucket = BucketsPtr + BucketNo;
466      // Found Val's bucket?  If so, return it.
467      if (KeyInfoT::isEqual(Val, ThisBucket->first)) {
468        FoundBucket = ThisBucket;
469        return true;
470      }
471
472      // If we found an empty bucket, the key doesn't exist in the set.
473      // Insert it and return the default value.
474      if (KeyInfoT::isEqual(ThisBucket->first, EmptyKey)) {
475        // If we've already seen a tombstone while probing, fill it in instead
476        // of the empty bucket we eventually probed to.
477        if (FoundTombstone) ThisBucket = FoundTombstone;
478        FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
479        return false;
480      }
481
482      // If this is a tombstone, remember it.  If Val ends up not in the map, we
483      // prefer to return it than something that would require more probing.
484      if (KeyInfoT::isEqual(ThisBucket->first, TombstoneKey) && !FoundTombstone)
485        FoundTombstone = ThisBucket;  // Remember the first tombstone found.
486
487      // Otherwise, it's a hash collision or a tombstone, continue quadratic
488      // probing.
489      BucketNo += ProbeAmt++;
490      BucketNo &= (NumBuckets-1);
491    }
492  }
493
494  template <typename LookupKeyT>
495  bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
496    const BucketT *ConstFoundBucket;
497    bool Result = const_cast<const DenseMapBase *>(this)
498      ->LookupBucketFor(Val, ConstFoundBucket);
499    FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
500    return Result;
501  }
502
503public:
504  /// Return the approximate size (in bytes) of the actual map.
505  /// This is just the raw memory used by DenseMap.
506  /// If entries are pointers to objects, the size of the referenced objects
507  /// are not included.
508  size_t getMemorySize() const {
509    return getNumBuckets() * sizeof(BucketT);
510  }
511};
512
513template<typename KeyT, typename ValueT,
514         typename KeyInfoT = DenseMapInfo<KeyT> >
515class DenseMap
516    : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT>,
517                          KeyT, ValueT, KeyInfoT> {
518  // Lift some types from the dependent base class into this class for
519  // simplicity of referring to them.
520  typedef DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT> BaseT;
521  typedef typename BaseT::BucketT BucketT;
522  friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT>;
523
524  BucketT *Buckets;
525  unsigned NumEntries;
526  unsigned NumTombstones;
527  unsigned NumBuckets;
528
529public:
530  explicit DenseMap(unsigned NumInitBuckets = 0) {
531    init(NumInitBuckets);
532  }
533
534  DenseMap(const DenseMap &other) {
535    init(0);
536    copyFrom(other);
537  }
538
539#if LLVM_USE_RVALUE_REFERENCES
540  DenseMap(DenseMap &&other) {
541    init(0);
542    swap(other);
543  }
544#endif
545
546  template<typename InputIt>
547  DenseMap(const InputIt &I, const InputIt &E) {
548    init(NextPowerOf2(std::distance(I, E)));
549    this->insert(I, E);
550  }
551
552  ~DenseMap() {
553    this->destroyAll();
554    operator delete(Buckets);
555  }
556
557  void swap(DenseMap& RHS) {
558    std::swap(Buckets, RHS.Buckets);
559    std::swap(NumEntries, RHS.NumEntries);
560    std::swap(NumTombstones, RHS.NumTombstones);
561    std::swap(NumBuckets, RHS.NumBuckets);
562  }
563
564  DenseMap& operator=(const DenseMap& other) {
565    copyFrom(other);
566    return *this;
567  }
568
569#if LLVM_USE_RVALUE_REFERENCES
570  DenseMap& operator=(DenseMap &&other) {
571    this->destroyAll();
572    operator delete(Buckets);
573    init(0);
574    swap(other);
575    return *this;
576  }
577#endif
578
579  void copyFrom(const DenseMap& other) {
580    this->destroyAll();
581    operator delete(Buckets);
582    if (allocateBuckets(other.NumBuckets)) {
583      this->BaseT::copyFrom(other);
584    } else {
585      NumEntries = 0;
586      NumTombstones = 0;
587    }
588  }
589
590  void init(unsigned InitBuckets) {
591    if (allocateBuckets(InitBuckets)) {
592      this->BaseT::initEmpty();
593    } else {
594      NumEntries = 0;
595      NumTombstones = 0;
596    }
597  }
598
599  void grow(unsigned AtLeast) {
600    unsigned OldNumBuckets = NumBuckets;
601    BucketT *OldBuckets = Buckets;
602
603    AtLeast = isPowerOf2_32(AtLeast) ? AtLeast : NextPowerOf2(AtLeast);
604    allocateBuckets(std::max<unsigned>(64, AtLeast));
605    assert(Buckets);
606    if (!OldBuckets) {
607      this->BaseT::initEmpty();
608      return;
609    }
610
611    this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
612
613    // Free the old table.
614    operator delete(OldBuckets);
615  }
616
617  void shrink_and_clear() {
618    unsigned OldNumEntries = NumEntries;
619    this->destroyAll();
620
621    // Reduce the number of buckets.
622    unsigned NewNumBuckets = 0;
623    if (OldNumEntries)
624      NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
625    if (NewNumBuckets == NumBuckets) {
626      this->BaseT::initEmpty();
627      return;
628    }
629
630    operator delete(Buckets);
631    init(NewNumBuckets);
632  }
633
634private:
635  unsigned getNumEntries() const {
636    return NumEntries;
637  }
638  void setNumEntries(unsigned Num) {
639    NumEntries = Num;
640  }
641
642  unsigned getNumTombstones() const {
643    return NumTombstones;
644  }
645  void setNumTombstones(unsigned Num) {
646    NumTombstones = Num;
647  }
648
649  BucketT *getBuckets() const {
650    return Buckets;
651  }
652
653  unsigned getNumBuckets() const {
654    return NumBuckets;
655  }
656
657  bool allocateBuckets(unsigned Num) {
658    NumBuckets = Num;
659    if (NumBuckets == 0) {
660      Buckets = 0;
661      return false;
662    }
663
664    Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT) * NumBuckets));
665    return true;
666  }
667};
668
669template<typename KeyT, typename ValueT,
670         unsigned InlineBuckets = 4,
671         typename KeyInfoT = DenseMapInfo<KeyT> >
672class SmallDenseMap
673    : public DenseMapBase<SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT>,
674                          KeyT, ValueT, KeyInfoT> {
675  // Lift some types from the dependent base class into this class for
676  // simplicity of referring to them.
677  typedef DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT> BaseT;
678  typedef typename BaseT::BucketT BucketT;
679  friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT>;
680
681  unsigned Small : 1;
682  unsigned NumEntries : 31;
683  unsigned NumTombstones;
684
685  struct LargeRep {
686    BucketT *Buckets;
687    unsigned NumBuckets;
688  };
689
690  /// A "union" of an inline bucket array and the struct representing
691  /// a large bucket. This union will be discriminated by the 'Small' bit.
692  AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;
693
694public:
695  explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
696    init(NumInitBuckets);
697  }
698
699  SmallDenseMap(const SmallDenseMap &other) {
700    init(0);
701    copyFrom(other);
702  }
703
704#if LLVM_USE_RVALUE_REFERENCES
705  SmallDenseMap(SmallDenseMap &&other) {
706    init(0);
707    swap(other);
708  }
709#endif
710
711  template<typename InputIt>
712  SmallDenseMap(const InputIt &I, const InputIt &E) {
713    init(NextPowerOf2(std::distance(I, E)));
714    this->insert(I, E);
715  }
716
717  ~SmallDenseMap() {
718    this->destroyAll();
719    deallocateBuckets();
720  }
721
722  void swap(SmallDenseMap& RHS) {
723    unsigned TmpNumEntries = RHS.NumEntries;
724    RHS.NumEntries = NumEntries;
725    NumEntries = TmpNumEntries;
726    std::swap(NumTombstones, RHS.NumTombstones);
727
728    const KeyT EmptyKey = this->getEmptyKey();
729    const KeyT TombstoneKey = this->getTombstoneKey();
730    if (Small && RHS.Small) {
731      // If we're swapping inline bucket arrays, we have to cope with some of
732      // the tricky bits of DenseMap's storage system: the buckets are not
733      // fully initialized. Thus we swap every key, but we may have
734      // a one-directional move of the value.
735      for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
736        BucketT *LHSB = &getInlineBuckets()[i],
737                *RHSB = &RHS.getInlineBuckets()[i];
738        bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->first, EmptyKey) &&
739                            !KeyInfoT::isEqual(LHSB->first, TombstoneKey));
740        bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->first, EmptyKey) &&
741                            !KeyInfoT::isEqual(RHSB->first, TombstoneKey));
742        if (hasLHSValue && hasRHSValue) {
743          // Swap together if we can...
744          std::swap(*LHSB, *RHSB);
745          continue;
746        }
747        // Swap separately and handle any assymetry.
748        std::swap(LHSB->first, RHSB->first);
749        if (hasLHSValue) {
750          new (&RHSB->second) ValueT(llvm_move(LHSB->second));
751          LHSB->second.~ValueT();
752        } else if (hasRHSValue) {
753          new (&LHSB->second) ValueT(llvm_move(RHSB->second));
754          RHSB->second.~ValueT();
755        }
756      }
757      return;
758    }
759    if (!Small && !RHS.Small) {
760      std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
761      std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
762      return;
763    }
764
765    SmallDenseMap &SmallSide = Small ? *this : RHS;
766    SmallDenseMap &LargeSide = Small ? RHS : *this;
767
768    // First stash the large side's rep and move the small side across.
769    LargeRep TmpRep = llvm_move(*LargeSide.getLargeRep());
770    LargeSide.getLargeRep()->~LargeRep();
771    LargeSide.Small = true;
772    // This is similar to the standard move-from-old-buckets, but the bucket
773    // count hasn't actually rotated in this case. So we have to carefully
774    // move construct the keys and values into their new locations, but there
775    // is no need to re-hash things.
776    for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
777      BucketT *NewB = &LargeSide.getInlineBuckets()[i],
778              *OldB = &SmallSide.getInlineBuckets()[i];
779      new (&NewB->first) KeyT(llvm_move(OldB->first));
780      OldB->first.~KeyT();
781      if (!KeyInfoT::isEqual(NewB->first, EmptyKey) &&
782          !KeyInfoT::isEqual(NewB->first, TombstoneKey)) {
783        new (&NewB->second) ValueT(llvm_move(OldB->second));
784        OldB->second.~ValueT();
785      }
786    }
787
788    // The hard part of moving the small buckets across is done, just move
789    // the TmpRep into its new home.
790    SmallSide.Small = false;
791    new (SmallSide.getLargeRep()) LargeRep(llvm_move(TmpRep));
792  }
793
794  SmallDenseMap& operator=(const SmallDenseMap& other) {
795    copyFrom(other);
796    return *this;
797  }
798
799#if LLVM_USE_RVALUE_REFERENCES
800  SmallDenseMap& operator=(SmallDenseMap &&other) {
801    this->destroyAll();
802    deallocateBuckets();
803    init(0);
804    swap(other);
805    return *this;
806  }
807#endif
808
809  void copyFrom(const SmallDenseMap& other) {
810    this->destroyAll();
811    deallocateBuckets();
812    Small = true;
813    if (other.getNumBuckets() > InlineBuckets) {
814      Small = false;
815      allocateBuckets(other.getNumBuckets());
816    }
817    this->BaseT::copyFrom(other);
818  }
819
820  void init(unsigned InitBuckets) {
821    Small = true;
822    if (InitBuckets > InlineBuckets) {
823      Small = false;
824      new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
825    }
826    this->BaseT::initEmpty();
827  }
828
829  void grow(unsigned AtLeast) {
830    if (AtLeast >= InlineBuckets) {
831      AtLeast = isPowerOf2_32(AtLeast) ? AtLeast : NextPowerOf2(AtLeast);
832      AtLeast = std::max<unsigned>(64, AtLeast);
833    }
834
835    if (Small) {
836      if (AtLeast < InlineBuckets)
837        return; // Nothing to do.
838
839      // First move the inline buckets into a temporary storage.
840      AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
841      BucketT *TmpBegin = reinterpret_cast<BucketT *>(TmpStorage.buffer);
842      BucketT *TmpEnd = TmpBegin;
843
844      // Loop over the buckets, moving non-empty, non-tombstones into the
845      // temporary storage. Have the loop move the TmpEnd forward as it goes.
846      const KeyT EmptyKey = this->getEmptyKey();
847      const KeyT TombstoneKey = this->getTombstoneKey();
848      for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
849        if (!KeyInfoT::isEqual(P->first, EmptyKey) &&
850            !KeyInfoT::isEqual(P->first, TombstoneKey)) {
851          assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
852                 "Too many inline buckets!");
853          new (&TmpEnd->first) KeyT(llvm_move(P->first));
854          new (&TmpEnd->second) ValueT(llvm_move(P->second));
855          ++TmpEnd;
856          P->second.~ValueT();
857        }
858        P->first.~KeyT();
859      }
860
861      // Now make this map use the large rep, and move all the entries back
862      // into it.
863      Small = false;
864      new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
865      this->moveFromOldBuckets(TmpBegin, TmpEnd);
866      return;
867    }
868
869    LargeRep OldRep = llvm_move(*getLargeRep());
870    getLargeRep()->~LargeRep();
871    if (AtLeast <= InlineBuckets) {
872      Small = true;
873    } else {
874      new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
875    }
876
877    this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
878
879    // Free the old table.
880    operator delete(OldRep.Buckets);
881  }
882
883  void shrink_and_clear() {
884    unsigned OldSize = this->size();
885    this->destroyAll();
886
887    // Reduce the number of buckets.
888    unsigned NewNumBuckets = 0;
889    if (OldSize) {
890      NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
891      if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
892        NewNumBuckets = 64;
893    }
894    if ((Small && NewNumBuckets <= InlineBuckets) ||
895        (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
896      this->BaseT::initEmpty();
897      return;
898    }
899
900    deallocateBuckets();
901    init(NewNumBuckets);
902  }
903
904private:
905  unsigned getNumEntries() const {
906    return NumEntries;
907  }
908  void setNumEntries(unsigned Num) {
909    assert(Num < INT_MAX && "Cannot support more than INT_MAX entries");
910    NumEntries = Num;
911  }
912
913  unsigned getNumTombstones() const {
914    return NumTombstones;
915  }
916  void setNumTombstones(unsigned Num) {
917    NumTombstones = Num;
918  }
919
920  const BucketT *getInlineBuckets() const {
921    assert(Small);
922    // Note that this cast does not violate aliasing rules as we assert that
923    // the memory's dynamic type is the small, inline bucket buffer, and the
924    // 'storage.buffer' static type is 'char *'.
925    return reinterpret_cast<const BucketT *>(storage.buffer);
926  }
927  BucketT *getInlineBuckets() {
928    return const_cast<BucketT *>(
929      const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
930  }
931  const LargeRep *getLargeRep() const {
932    assert(!Small);
933    // Note, same rule about aliasing as with getInlineBuckets.
934    return reinterpret_cast<const LargeRep *>(storage.buffer);
935  }
936  LargeRep *getLargeRep() {
937    return const_cast<LargeRep *>(
938      const_cast<const SmallDenseMap *>(this)->getLargeRep());
939  }
940
941  const BucketT *getBuckets() const {
942    return Small ? getInlineBuckets() : getLargeRep()->Buckets;
943  }
944  BucketT *getBuckets() {
945    return const_cast<BucketT *>(
946      const_cast<const SmallDenseMap *>(this)->getBuckets());
947  }
948  unsigned getNumBuckets() const {
949    return Small ? InlineBuckets : getLargeRep()->NumBuckets;
950  }
951
952  void deallocateBuckets() {
953    if (Small)
954      return;
955
956    operator delete(getLargeRep()->Buckets);
957    getLargeRep()->~LargeRep();
958  }
959
960  LargeRep allocateBuckets(unsigned Num) {
961    assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
962    LargeRep Rep = {
963      static_cast<BucketT*>(operator new(sizeof(BucketT) * Num)), Num
964    };
965    return Rep;
966  }
967};
968
969template<typename KeyT, typename ValueT,
970         typename KeyInfoT, bool IsConst>
971class DenseMapIterator {
972  typedef std::pair<KeyT, ValueT> Bucket;
973  typedef DenseMapIterator<KeyT, ValueT,
974                           KeyInfoT, true> ConstIterator;
975  friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, true>;
976public:
977  typedef ptrdiff_t difference_type;
978  typedef typename conditional<IsConst, const Bucket, Bucket>::type value_type;
979  typedef value_type *pointer;
980  typedef value_type &reference;
981  typedef std::forward_iterator_tag iterator_category;
982private:
983  pointer Ptr, End;
984public:
985  DenseMapIterator() : Ptr(0), End(0) {}
986
987  DenseMapIterator(pointer Pos, pointer E, bool NoAdvance = false)
988    : Ptr(Pos), End(E) {
989    if (!NoAdvance) AdvancePastEmptyBuckets();
990  }
991
992  // If IsConst is true this is a converting constructor from iterator to
993  // const_iterator and the default copy constructor is used.
994  // Otherwise this is a copy constructor for iterator.
995  DenseMapIterator(const DenseMapIterator<KeyT, ValueT,
996                                          KeyInfoT, false>& I)
997    : Ptr(I.Ptr), End(I.End) {}
998
999  reference operator*() const {
1000    return *Ptr;
1001  }
1002  pointer operator->() const {
1003    return Ptr;
1004  }
1005
1006  bool operator==(const ConstIterator &RHS) const {
1007    return Ptr == RHS.operator->();
1008  }
1009  bool operator!=(const ConstIterator &RHS) const {
1010    return Ptr != RHS.operator->();
1011  }
1012
1013  inline DenseMapIterator& operator++() {  // Preincrement
1014    ++Ptr;
1015    AdvancePastEmptyBuckets();
1016    return *this;
1017  }
1018  DenseMapIterator operator++(int) {  // Postincrement
1019    DenseMapIterator tmp = *this; ++*this; return tmp;
1020  }
1021
1022private:
1023  void AdvancePastEmptyBuckets() {
1024    const KeyT Empty = KeyInfoT::getEmptyKey();
1025    const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1026
1027    while (Ptr != End &&
1028           (KeyInfoT::isEqual(Ptr->first, Empty) ||
1029            KeyInfoT::isEqual(Ptr->first, Tombstone)))
1030      ++Ptr;
1031  }
1032};
1033
1034template<typename KeyT, typename ValueT, typename KeyInfoT>
1035static inline size_t
1036capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) {
1037  return X.getMemorySize();
1038}
1039
1040} // end namespace llvm
1041
1042#endif
1043