DenseMap.h revision cbe40cfe96a6bb3f2da56445269c2c71e55e0e56
1//===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the DenseMap class.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_ADT_DENSEMAP_H
15#define LLVM_ADT_DENSEMAP_H
16
17#include "llvm/ADT/DenseMapInfo.h"
18#include "llvm/Support/AlignOf.h"
19#include "llvm/Support/Compiler.h"
20#include "llvm/Support/MathExtras.h"
21#include "llvm/Support/PointerLikeTypeTraits.h"
22#include "llvm/Support/type_traits.h"
23#include <algorithm>
24#include <cassert>
25#include <climits>
26#include <cstddef>
27#include <cstring>
28#include <iterator>
29#include <new>
30#include <utility>
31
32namespace llvm {
33
34template<typename KeyT, typename ValueT,
35         typename KeyInfoT = DenseMapInfo<KeyT>,
36         bool IsConst = false>
37class DenseMapIterator;
38
39template<typename DerivedT,
40         typename KeyT, typename ValueT, typename KeyInfoT>
41class DenseMapBase {
42protected:
43  typedef std::pair<KeyT, ValueT> BucketT;
44
45public:
46  typedef KeyT key_type;
47  typedef ValueT mapped_type;
48  typedef BucketT value_type;
49
50  typedef DenseMapIterator<KeyT, ValueT, KeyInfoT> iterator;
51  typedef DenseMapIterator<KeyT, ValueT,
52                           KeyInfoT, true> const_iterator;
53  inline iterator begin() {
54    // When the map is empty, avoid the overhead of AdvancePastEmptyBuckets().
55    return empty() ? end() : iterator(getBuckets(), getBucketsEnd());
56  }
57  inline iterator end() {
58    return iterator(getBucketsEnd(), getBucketsEnd(), true);
59  }
60  inline const_iterator begin() const {
61    return empty() ? end() : const_iterator(getBuckets(), getBucketsEnd());
62  }
63  inline const_iterator end() const {
64    return const_iterator(getBucketsEnd(), getBucketsEnd(), true);
65  }
66
67  bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const {
68    return getNumEntries() == 0;
69  }
70  unsigned size() const { return getNumEntries(); }
71
72  /// Grow the densemap so that it has at least Size buckets. Does not shrink
73  void resize(size_t Size) {
74    if (Size > getNumBuckets())
75      grow(Size);
76  }
77
78  void clear() {
79    if (getNumEntries() == 0 && getNumTombstones() == 0) return;
80
81    // If the capacity of the array is huge, and the # elements used is small,
82    // shrink the array.
83    if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
84      shrink_and_clear();
85      return;
86    }
87
88    const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
89    for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
90      if (!KeyInfoT::isEqual(P->first, EmptyKey)) {
91        if (!KeyInfoT::isEqual(P->first, TombstoneKey)) {
92          P->second.~ValueT();
93          decrementNumEntries();
94        }
95        P->first = EmptyKey;
96      }
97    }
98    assert(getNumEntries() == 0 && "Node count imbalance!");
99    setNumTombstones(0);
100  }
101
102  /// count - Return true if the specified key is in the map.
103  bool count(const KeyT &Val) const {
104    const BucketT *TheBucket;
105    return LookupBucketFor(Val, TheBucket);
106  }
107
108  iterator find(const KeyT &Val) {
109    BucketT *TheBucket;
110    if (LookupBucketFor(Val, TheBucket))
111      return iterator(TheBucket, getBucketsEnd(), true);
112    return end();
113  }
114  const_iterator find(const KeyT &Val) const {
115    const BucketT *TheBucket;
116    if (LookupBucketFor(Val, TheBucket))
117      return const_iterator(TheBucket, getBucketsEnd(), true);
118    return end();
119  }
120
121  /// Alternate version of find() which allows a different, and possibly
122  /// less expensive, key type.
123  /// The DenseMapInfo is responsible for supplying methods
124  /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
125  /// type used.
126  template<class LookupKeyT>
127  iterator find_as(const LookupKeyT &Val) {
128    BucketT *TheBucket;
129    if (LookupBucketFor(Val, TheBucket))
130      return iterator(TheBucket, getBucketsEnd(), true);
131    return end();
132  }
133  template<class LookupKeyT>
134  const_iterator find_as(const LookupKeyT &Val) const {
135    const BucketT *TheBucket;
136    if (LookupBucketFor(Val, TheBucket))
137      return const_iterator(TheBucket, getBucketsEnd(), true);
138    return end();
139  }
140
141  /// lookup - Return the entry for the specified key, or a default
142  /// constructed value if no such entry exists.
143  ValueT lookup(const KeyT &Val) const {
144    const BucketT *TheBucket;
145    if (LookupBucketFor(Val, TheBucket))
146      return TheBucket->second;
147    return ValueT();
148  }
149
150  // Inserts key,value pair into the map if the key isn't already in the map.
151  // If the key is already in the map, it returns false and doesn't update the
152  // value.
153  std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
154    BucketT *TheBucket;
155    if (LookupBucketFor(KV.first, TheBucket))
156      return std::make_pair(iterator(TheBucket, getBucketsEnd(), true),
157                            false); // Already in map.
158
159    // Otherwise, insert the new element.
160    TheBucket = InsertIntoBucket(KV.first, KV.second, TheBucket);
161    return std::make_pair(iterator(TheBucket, getBucketsEnd(), true), true);
162  }
163
164#if LLVM_HAS_RVALUE_REFERENCES
165  // Inserts key,value pair into the map if the key isn't already in the map.
166  // If the key is already in the map, it returns false and doesn't update the
167  // value.
168  std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
169    BucketT *TheBucket;
170    if (LookupBucketFor(KV.first, TheBucket))
171      return std::make_pair(iterator(TheBucket, getBucketsEnd(), true),
172                            false); // Already in map.
173
174    // Otherwise, insert the new element.
175    TheBucket = InsertIntoBucket(std::move(KV.first),
176                                 std::move(KV.second),
177                                 TheBucket);
178    return std::make_pair(iterator(TheBucket, getBucketsEnd(), true), true);
179  }
180#endif
181
182  /// insert - Range insertion of pairs.
183  template<typename InputIt>
184  void insert(InputIt I, InputIt E) {
185    for (; I != E; ++I)
186      insert(*I);
187  }
188
189
190  bool erase(const KeyT &Val) {
191    BucketT *TheBucket;
192    if (!LookupBucketFor(Val, TheBucket))
193      return false; // not in map.
194
195    TheBucket->second.~ValueT();
196    TheBucket->first = getTombstoneKey();
197    decrementNumEntries();
198    incrementNumTombstones();
199    return true;
200  }
201  void erase(iterator I) {
202    BucketT *TheBucket = &*I;
203    TheBucket->second.~ValueT();
204    TheBucket->first = getTombstoneKey();
205    decrementNumEntries();
206    incrementNumTombstones();
207  }
208
209  value_type& FindAndConstruct(const KeyT &Key) {
210    BucketT *TheBucket;
211    if (LookupBucketFor(Key, TheBucket))
212      return *TheBucket;
213
214    return *InsertIntoBucket(Key, ValueT(), TheBucket);
215  }
216
217  ValueT &operator[](const KeyT &Key) {
218    return FindAndConstruct(Key).second;
219  }
220
221#if LLVM_HAS_RVALUE_REFERENCES
222  value_type& FindAndConstruct(KeyT &&Key) {
223    BucketT *TheBucket;
224    if (LookupBucketFor(Key, TheBucket))
225      return *TheBucket;
226
227    return *InsertIntoBucket(std::move(Key), ValueT(), TheBucket);
228  }
229
230  ValueT &operator[](KeyT &&Key) {
231    return FindAndConstruct(std::move(Key)).second;
232  }
233#endif
234
235  /// isPointerIntoBucketsArray - Return true if the specified pointer points
236  /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
237  /// value in the DenseMap).
238  bool isPointerIntoBucketsArray(const void *Ptr) const {
239    return Ptr >= getBuckets() && Ptr < getBucketsEnd();
240  }
241
242  /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
243  /// array.  In conjunction with the previous method, this can be used to
244  /// determine whether an insertion caused the DenseMap to reallocate.
245  const void *getPointerIntoBucketsArray() const { return getBuckets(); }
246
247protected:
248  DenseMapBase() {}
249
250  void destroyAll() {
251    if (getNumBuckets() == 0) // Nothing to do.
252      return;
253
254    const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
255    for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
256      if (!KeyInfoT::isEqual(P->first, EmptyKey) &&
257          !KeyInfoT::isEqual(P->first, TombstoneKey))
258        P->second.~ValueT();
259      P->first.~KeyT();
260    }
261
262#ifndef NDEBUG
263    memset((void*)getBuckets(), 0x5a, sizeof(BucketT)*getNumBuckets());
264#endif
265  }
266
267  void initEmpty() {
268    setNumEntries(0);
269    setNumTombstones(0);
270
271    assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
272           "# initial buckets must be a power of two!");
273    const KeyT EmptyKey = getEmptyKey();
274    for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
275      new (&B->first) KeyT(EmptyKey);
276  }
277
278  void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
279    initEmpty();
280
281    // Insert all the old elements.
282    const KeyT EmptyKey = getEmptyKey();
283    const KeyT TombstoneKey = getTombstoneKey();
284    for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
285      if (!KeyInfoT::isEqual(B->first, EmptyKey) &&
286          !KeyInfoT::isEqual(B->first, TombstoneKey)) {
287        // Insert the key/value into the new table.
288        BucketT *DestBucket;
289        bool FoundVal = LookupBucketFor(B->first, DestBucket);
290        (void)FoundVal; // silence warning.
291        assert(!FoundVal && "Key already in new map?");
292        DestBucket->first = llvm_move(B->first);
293        new (&DestBucket->second) ValueT(llvm_move(B->second));
294        incrementNumEntries();
295
296        // Free the value.
297        B->second.~ValueT();
298      }
299      B->first.~KeyT();
300    }
301
302#ifndef NDEBUG
303    if (OldBucketsBegin != OldBucketsEnd)
304      memset((void*)OldBucketsBegin, 0x5a,
305             sizeof(BucketT) * (OldBucketsEnd - OldBucketsBegin));
306#endif
307  }
308
309  template <typename OtherBaseT>
310  void copyFrom(const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT>& other) {
311    assert(getNumBuckets() == other.getNumBuckets());
312
313    setNumEntries(other.getNumEntries());
314    setNumTombstones(other.getNumTombstones());
315
316    if (isPodLike<KeyT>::value && isPodLike<ValueT>::value)
317      memcpy(getBuckets(), other.getBuckets(),
318             getNumBuckets() * sizeof(BucketT));
319    else
320      for (size_t i = 0; i < getNumBuckets(); ++i) {
321        new (&getBuckets()[i].first) KeyT(other.getBuckets()[i].first);
322        if (!KeyInfoT::isEqual(getBuckets()[i].first, getEmptyKey()) &&
323            !KeyInfoT::isEqual(getBuckets()[i].first, getTombstoneKey()))
324          new (&getBuckets()[i].second) ValueT(other.getBuckets()[i].second);
325      }
326  }
327
328  void swap(DenseMapBase& RHS) {
329    std::swap(getNumEntries(), RHS.getNumEntries());
330    std::swap(getNumTombstones(), RHS.getNumTombstones());
331  }
332
333  static unsigned getHashValue(const KeyT &Val) {
334    return KeyInfoT::getHashValue(Val);
335  }
336  template<typename LookupKeyT>
337  static unsigned getHashValue(const LookupKeyT &Val) {
338    return KeyInfoT::getHashValue(Val);
339  }
340  static const KeyT getEmptyKey() {
341    return KeyInfoT::getEmptyKey();
342  }
343  static const KeyT getTombstoneKey() {
344    return KeyInfoT::getTombstoneKey();
345  }
346
347private:
348  unsigned getNumEntries() const {
349    return static_cast<const DerivedT *>(this)->getNumEntries();
350  }
351  void setNumEntries(unsigned Num) {
352    static_cast<DerivedT *>(this)->setNumEntries(Num);
353  }
354  void incrementNumEntries() {
355    setNumEntries(getNumEntries() + 1);
356  }
357  void decrementNumEntries() {
358    setNumEntries(getNumEntries() - 1);
359  }
360  unsigned getNumTombstones() const {
361    return static_cast<const DerivedT *>(this)->getNumTombstones();
362  }
363  void setNumTombstones(unsigned Num) {
364    static_cast<DerivedT *>(this)->setNumTombstones(Num);
365  }
366  void incrementNumTombstones() {
367    setNumTombstones(getNumTombstones() + 1);
368  }
369  void decrementNumTombstones() {
370    setNumTombstones(getNumTombstones() - 1);
371  }
372  const BucketT *getBuckets() const {
373    return static_cast<const DerivedT *>(this)->getBuckets();
374  }
375  BucketT *getBuckets() {
376    return static_cast<DerivedT *>(this)->getBuckets();
377  }
378  unsigned getNumBuckets() const {
379    return static_cast<const DerivedT *>(this)->getNumBuckets();
380  }
381  BucketT *getBucketsEnd() {
382    return getBuckets() + getNumBuckets();
383  }
384  const BucketT *getBucketsEnd() const {
385    return getBuckets() + getNumBuckets();
386  }
387
388  void grow(unsigned AtLeast) {
389    static_cast<DerivedT *>(this)->grow(AtLeast);
390  }
391
392  void shrink_and_clear() {
393    static_cast<DerivedT *>(this)->shrink_and_clear();
394  }
395
396
397  BucketT *InsertIntoBucket(const KeyT &Key, const ValueT &Value,
398                            BucketT *TheBucket) {
399    TheBucket = InsertIntoBucketImpl(Key, TheBucket);
400
401    TheBucket->first = Key;
402    new (&TheBucket->second) ValueT(Value);
403    return TheBucket;
404  }
405
406#if LLVM_HAS_RVALUE_REFERENCES
407  BucketT *InsertIntoBucket(const KeyT &Key, ValueT &&Value,
408                            BucketT *TheBucket) {
409    TheBucket = InsertIntoBucketImpl(Key, TheBucket);
410
411    TheBucket->first = Key;
412    new (&TheBucket->second) ValueT(std::move(Value));
413    return TheBucket;
414  }
415
416  BucketT *InsertIntoBucket(KeyT &&Key, ValueT &&Value, BucketT *TheBucket) {
417    TheBucket = InsertIntoBucketImpl(Key, TheBucket);
418
419    TheBucket->first = std::move(Key);
420    new (&TheBucket->second) ValueT(std::move(Value));
421    return TheBucket;
422  }
423#endif
424
425  BucketT *InsertIntoBucketImpl(const KeyT &Key, BucketT *TheBucket) {
426    // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
427    // the buckets are empty (meaning that many are filled with tombstones),
428    // grow the table.
429    //
430    // The later case is tricky.  For example, if we had one empty bucket with
431    // tons of tombstones, failing lookups (e.g. for insertion) would have to
432    // probe almost the entire table until it found the empty bucket.  If the
433    // table completely filled with tombstones, no lookup would ever succeed,
434    // causing infinite loops in lookup.
435    unsigned NewNumEntries = getNumEntries() + 1;
436    unsigned NumBuckets = getNumBuckets();
437    if (NewNumEntries*4 >= NumBuckets*3) {
438      this->grow(NumBuckets * 2);
439      LookupBucketFor(Key, TheBucket);
440      NumBuckets = getNumBuckets();
441    }
442    if (NumBuckets-(NewNumEntries+getNumTombstones()) <= NumBuckets/8) {
443      this->grow(NumBuckets * 2);
444      LookupBucketFor(Key, TheBucket);
445    }
446    assert(TheBucket);
447
448    // Only update the state after we've grown our bucket space appropriately
449    // so that when growing buckets we have self-consistent entry count.
450    incrementNumEntries();
451
452    // If we are writing over a tombstone, remember this.
453    const KeyT EmptyKey = getEmptyKey();
454    if (!KeyInfoT::isEqual(TheBucket->first, EmptyKey))
455      decrementNumTombstones();
456
457    return TheBucket;
458  }
459
460  /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
461  /// FoundBucket.  If the bucket contains the key and a value, this returns
462  /// true, otherwise it returns a bucket with an empty marker or tombstone and
463  /// returns false.
464  template<typename LookupKeyT>
465  bool LookupBucketFor(const LookupKeyT &Val,
466                       const BucketT *&FoundBucket) const {
467    const BucketT *BucketsPtr = getBuckets();
468    const unsigned NumBuckets = getNumBuckets();
469
470    if (NumBuckets == 0) {
471      FoundBucket = 0;
472      return false;
473    }
474
475    // FoundTombstone - Keep track of whether we find a tombstone while probing.
476    const BucketT *FoundTombstone = 0;
477    const KeyT EmptyKey = getEmptyKey();
478    const KeyT TombstoneKey = getTombstoneKey();
479    assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
480           !KeyInfoT::isEqual(Val, TombstoneKey) &&
481           "Empty/Tombstone value shouldn't be inserted into map!");
482
483    unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
484    unsigned ProbeAmt = 1;
485    while (1) {
486      const BucketT *ThisBucket = BucketsPtr + BucketNo;
487      // Found Val's bucket?  If so, return it.
488      if (KeyInfoT::isEqual(Val, ThisBucket->first)) {
489        FoundBucket = ThisBucket;
490        return true;
491      }
492
493      // If we found an empty bucket, the key doesn't exist in the set.
494      // Insert it and return the default value.
495      if (KeyInfoT::isEqual(ThisBucket->first, EmptyKey)) {
496        // If we've already seen a tombstone while probing, fill it in instead
497        // of the empty bucket we eventually probed to.
498        FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
499        return false;
500      }
501
502      // If this is a tombstone, remember it.  If Val ends up not in the map, we
503      // prefer to return it than something that would require more probing.
504      if (KeyInfoT::isEqual(ThisBucket->first, TombstoneKey) && !FoundTombstone)
505        FoundTombstone = ThisBucket;  // Remember the first tombstone found.
506
507      // Otherwise, it's a hash collision or a tombstone, continue quadratic
508      // probing.
509      BucketNo += ProbeAmt++;
510      BucketNo &= (NumBuckets-1);
511    }
512  }
513
514  template <typename LookupKeyT>
515  bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
516    const BucketT *ConstFoundBucket;
517    bool Result = const_cast<const DenseMapBase *>(this)
518      ->LookupBucketFor(Val, ConstFoundBucket);
519    FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
520    return Result;
521  }
522
523public:
524  /// Return the approximate size (in bytes) of the actual map.
525  /// This is just the raw memory used by DenseMap.
526  /// If entries are pointers to objects, the size of the referenced objects
527  /// are not included.
528  size_t getMemorySize() const {
529    return getNumBuckets() * sizeof(BucketT);
530  }
531};
532
533template<typename KeyT, typename ValueT,
534         typename KeyInfoT = DenseMapInfo<KeyT> >
535class DenseMap
536    : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT>,
537                          KeyT, ValueT, KeyInfoT> {
538  // Lift some types from the dependent base class into this class for
539  // simplicity of referring to them.
540  typedef DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT> BaseT;
541  typedef typename BaseT::BucketT BucketT;
542  friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT>;
543
544  BucketT *Buckets;
545  unsigned NumEntries;
546  unsigned NumTombstones;
547  unsigned NumBuckets;
548
549public:
550  explicit DenseMap(unsigned NumInitBuckets = 0) {
551    init(NumInitBuckets);
552  }
553
554  DenseMap(const DenseMap &other) : BaseT() {
555    init(0);
556    copyFrom(other);
557  }
558
559#if LLVM_HAS_RVALUE_REFERENCES
560  DenseMap(DenseMap &&other) : BaseT() {
561    init(0);
562    swap(other);
563  }
564#endif
565
566  template<typename InputIt>
567  DenseMap(const InputIt &I, const InputIt &E) {
568    init(NextPowerOf2(std::distance(I, E)));
569    this->insert(I, E);
570  }
571
572  ~DenseMap() {
573    this->destroyAll();
574    operator delete(Buckets);
575  }
576
577  void swap(DenseMap& RHS) {
578    std::swap(Buckets, RHS.Buckets);
579    std::swap(NumEntries, RHS.NumEntries);
580    std::swap(NumTombstones, RHS.NumTombstones);
581    std::swap(NumBuckets, RHS.NumBuckets);
582  }
583
584  DenseMap& operator=(const DenseMap& other) {
585    copyFrom(other);
586    return *this;
587  }
588
589#if LLVM_HAS_RVALUE_REFERENCES
590  DenseMap& operator=(DenseMap &&other) {
591    this->destroyAll();
592    operator delete(Buckets);
593    init(0);
594    swap(other);
595    return *this;
596  }
597#endif
598
599  void copyFrom(const DenseMap& other) {
600    this->destroyAll();
601    operator delete(Buckets);
602    if (allocateBuckets(other.NumBuckets)) {
603      this->BaseT::copyFrom(other);
604    } else {
605      NumEntries = 0;
606      NumTombstones = 0;
607    }
608  }
609
610  void init(unsigned InitBuckets) {
611    if (allocateBuckets(InitBuckets)) {
612      this->BaseT::initEmpty();
613    } else {
614      NumEntries = 0;
615      NumTombstones = 0;
616    }
617  }
618
619  void grow(unsigned AtLeast) {
620    unsigned OldNumBuckets = NumBuckets;
621    BucketT *OldBuckets = Buckets;
622
623    allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
624    assert(Buckets);
625    if (!OldBuckets) {
626      this->BaseT::initEmpty();
627      return;
628    }
629
630    this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
631
632    // Free the old table.
633    operator delete(OldBuckets);
634  }
635
636  void shrink_and_clear() {
637    unsigned OldNumEntries = NumEntries;
638    this->destroyAll();
639
640    // Reduce the number of buckets.
641    unsigned NewNumBuckets = 0;
642    if (OldNumEntries)
643      NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
644    if (NewNumBuckets == NumBuckets) {
645      this->BaseT::initEmpty();
646      return;
647    }
648
649    operator delete(Buckets);
650    init(NewNumBuckets);
651  }
652
653private:
654  unsigned getNumEntries() const {
655    return NumEntries;
656  }
657  void setNumEntries(unsigned Num) {
658    NumEntries = Num;
659  }
660
661  unsigned getNumTombstones() const {
662    return NumTombstones;
663  }
664  void setNumTombstones(unsigned Num) {
665    NumTombstones = Num;
666  }
667
668  BucketT *getBuckets() const {
669    return Buckets;
670  }
671
672  unsigned getNumBuckets() const {
673    return NumBuckets;
674  }
675
676  bool allocateBuckets(unsigned Num) {
677    NumBuckets = Num;
678    if (NumBuckets == 0) {
679      Buckets = 0;
680      return false;
681    }
682
683    Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT) * NumBuckets));
684    return true;
685  }
686};
687
688template<typename KeyT, typename ValueT,
689         unsigned InlineBuckets = 4,
690         typename KeyInfoT = DenseMapInfo<KeyT> >
691class SmallDenseMap
692    : public DenseMapBase<SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT>,
693                          KeyT, ValueT, KeyInfoT> {
694  // Lift some types from the dependent base class into this class for
695  // simplicity of referring to them.
696  typedef DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT> BaseT;
697  typedef typename BaseT::BucketT BucketT;
698  friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT>;
699
700  unsigned Small : 1;
701  unsigned NumEntries : 31;
702  unsigned NumTombstones;
703
704  struct LargeRep {
705    BucketT *Buckets;
706    unsigned NumBuckets;
707  };
708
709  /// A "union" of an inline bucket array and the struct representing
710  /// a large bucket. This union will be discriminated by the 'Small' bit.
711  AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;
712
713public:
714  explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
715    init(NumInitBuckets);
716  }
717
718  SmallDenseMap(const SmallDenseMap &other) : BaseT() {
719    init(0);
720    copyFrom(other);
721  }
722
723#if LLVM_HAS_RVALUE_REFERENCES
724  SmallDenseMap(SmallDenseMap &&other) : BaseT() {
725    init(0);
726    swap(other);
727  }
728#endif
729
730  template<typename InputIt>
731  SmallDenseMap(const InputIt &I, const InputIt &E) {
732    init(NextPowerOf2(std::distance(I, E)));
733    this->insert(I, E);
734  }
735
736  ~SmallDenseMap() {
737    this->destroyAll();
738    deallocateBuckets();
739  }
740
741  void swap(SmallDenseMap& RHS) {
742    unsigned TmpNumEntries = RHS.NumEntries;
743    RHS.NumEntries = NumEntries;
744    NumEntries = TmpNumEntries;
745    std::swap(NumTombstones, RHS.NumTombstones);
746
747    const KeyT EmptyKey = this->getEmptyKey();
748    const KeyT TombstoneKey = this->getTombstoneKey();
749    if (Small && RHS.Small) {
750      // If we're swapping inline bucket arrays, we have to cope with some of
751      // the tricky bits of DenseMap's storage system: the buckets are not
752      // fully initialized. Thus we swap every key, but we may have
753      // a one-directional move of the value.
754      for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
755        BucketT *LHSB = &getInlineBuckets()[i],
756                *RHSB = &RHS.getInlineBuckets()[i];
757        bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->first, EmptyKey) &&
758                            !KeyInfoT::isEqual(LHSB->first, TombstoneKey));
759        bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->first, EmptyKey) &&
760                            !KeyInfoT::isEqual(RHSB->first, TombstoneKey));
761        if (hasLHSValue && hasRHSValue) {
762          // Swap together if we can...
763          std::swap(*LHSB, *RHSB);
764          continue;
765        }
766        // Swap separately and handle any assymetry.
767        std::swap(LHSB->first, RHSB->first);
768        if (hasLHSValue) {
769          new (&RHSB->second) ValueT(llvm_move(LHSB->second));
770          LHSB->second.~ValueT();
771        } else if (hasRHSValue) {
772          new (&LHSB->second) ValueT(llvm_move(RHSB->second));
773          RHSB->second.~ValueT();
774        }
775      }
776      return;
777    }
778    if (!Small && !RHS.Small) {
779      std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
780      std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
781      return;
782    }
783
784    SmallDenseMap &SmallSide = Small ? *this : RHS;
785    SmallDenseMap &LargeSide = Small ? RHS : *this;
786
787    // First stash the large side's rep and move the small side across.
788    LargeRep TmpRep = llvm_move(*LargeSide.getLargeRep());
789    LargeSide.getLargeRep()->~LargeRep();
790    LargeSide.Small = true;
791    // This is similar to the standard move-from-old-buckets, but the bucket
792    // count hasn't actually rotated in this case. So we have to carefully
793    // move construct the keys and values into their new locations, but there
794    // is no need to re-hash things.
795    for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
796      BucketT *NewB = &LargeSide.getInlineBuckets()[i],
797              *OldB = &SmallSide.getInlineBuckets()[i];
798      new (&NewB->first) KeyT(llvm_move(OldB->first));
799      OldB->first.~KeyT();
800      if (!KeyInfoT::isEqual(NewB->first, EmptyKey) &&
801          !KeyInfoT::isEqual(NewB->first, TombstoneKey)) {
802        new (&NewB->second) ValueT(llvm_move(OldB->second));
803        OldB->second.~ValueT();
804      }
805    }
806
807    // The hard part of moving the small buckets across is done, just move
808    // the TmpRep into its new home.
809    SmallSide.Small = false;
810    new (SmallSide.getLargeRep()) LargeRep(llvm_move(TmpRep));
811  }
812
813  SmallDenseMap& operator=(const SmallDenseMap& other) {
814    copyFrom(other);
815    return *this;
816  }
817
818#if LLVM_HAS_RVALUE_REFERENCES
819  SmallDenseMap& operator=(SmallDenseMap &&other) {
820    this->destroyAll();
821    deallocateBuckets();
822    init(0);
823    swap(other);
824    return *this;
825  }
826#endif
827
828  void copyFrom(const SmallDenseMap& other) {
829    this->destroyAll();
830    deallocateBuckets();
831    Small = true;
832    if (other.getNumBuckets() > InlineBuckets) {
833      Small = false;
834      allocateBuckets(other.getNumBuckets());
835    }
836    this->BaseT::copyFrom(other);
837  }
838
839  void init(unsigned InitBuckets) {
840    Small = true;
841    if (InitBuckets > InlineBuckets) {
842      Small = false;
843      new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
844    }
845    this->BaseT::initEmpty();
846  }
847
848  void grow(unsigned AtLeast) {
849    if (AtLeast >= InlineBuckets)
850      AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1));
851
852    if (Small) {
853      if (AtLeast < InlineBuckets)
854        return; // Nothing to do.
855
856      // First move the inline buckets into a temporary storage.
857      AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
858      BucketT *TmpBegin = reinterpret_cast<BucketT *>(TmpStorage.buffer);
859      BucketT *TmpEnd = TmpBegin;
860
861      // Loop over the buckets, moving non-empty, non-tombstones into the
862      // temporary storage. Have the loop move the TmpEnd forward as it goes.
863      const KeyT EmptyKey = this->getEmptyKey();
864      const KeyT TombstoneKey = this->getTombstoneKey();
865      for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
866        if (!KeyInfoT::isEqual(P->first, EmptyKey) &&
867            !KeyInfoT::isEqual(P->first, TombstoneKey)) {
868          assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
869                 "Too many inline buckets!");
870          new (&TmpEnd->first) KeyT(llvm_move(P->first));
871          new (&TmpEnd->second) ValueT(llvm_move(P->second));
872          ++TmpEnd;
873          P->second.~ValueT();
874        }
875        P->first.~KeyT();
876      }
877
878      // Now make this map use the large rep, and move all the entries back
879      // into it.
880      Small = false;
881      new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
882      this->moveFromOldBuckets(TmpBegin, TmpEnd);
883      return;
884    }
885
886    LargeRep OldRep = llvm_move(*getLargeRep());
887    getLargeRep()->~LargeRep();
888    if (AtLeast <= InlineBuckets) {
889      Small = true;
890    } else {
891      new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
892    }
893
894    this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
895
896    // Free the old table.
897    operator delete(OldRep.Buckets);
898  }
899
900  void shrink_and_clear() {
901    unsigned OldSize = this->size();
902    this->destroyAll();
903
904    // Reduce the number of buckets.
905    unsigned NewNumBuckets = 0;
906    if (OldSize) {
907      NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
908      if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
909        NewNumBuckets = 64;
910    }
911    if ((Small && NewNumBuckets <= InlineBuckets) ||
912        (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
913      this->BaseT::initEmpty();
914      return;
915    }
916
917    deallocateBuckets();
918    init(NewNumBuckets);
919  }
920
921private:
922  unsigned getNumEntries() const {
923    return NumEntries;
924  }
925  void setNumEntries(unsigned Num) {
926    assert(Num < INT_MAX && "Cannot support more than INT_MAX entries");
927    NumEntries = Num;
928  }
929
930  unsigned getNumTombstones() const {
931    return NumTombstones;
932  }
933  void setNumTombstones(unsigned Num) {
934    NumTombstones = Num;
935  }
936
937  const BucketT *getInlineBuckets() const {
938    assert(Small);
939    // Note that this cast does not violate aliasing rules as we assert that
940    // the memory's dynamic type is the small, inline bucket buffer, and the
941    // 'storage.buffer' static type is 'char *'.
942    return reinterpret_cast<const BucketT *>(storage.buffer);
943  }
944  BucketT *getInlineBuckets() {
945    return const_cast<BucketT *>(
946      const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
947  }
948  const LargeRep *getLargeRep() const {
949    assert(!Small);
950    // Note, same rule about aliasing as with getInlineBuckets.
951    return reinterpret_cast<const LargeRep *>(storage.buffer);
952  }
953  LargeRep *getLargeRep() {
954    return const_cast<LargeRep *>(
955      const_cast<const SmallDenseMap *>(this)->getLargeRep());
956  }
957
958  const BucketT *getBuckets() const {
959    return Small ? getInlineBuckets() : getLargeRep()->Buckets;
960  }
961  BucketT *getBuckets() {
962    return const_cast<BucketT *>(
963      const_cast<const SmallDenseMap *>(this)->getBuckets());
964  }
965  unsigned getNumBuckets() const {
966    return Small ? InlineBuckets : getLargeRep()->NumBuckets;
967  }
968
969  void deallocateBuckets() {
970    if (Small)
971      return;
972
973    operator delete(getLargeRep()->Buckets);
974    getLargeRep()->~LargeRep();
975  }
976
977  LargeRep allocateBuckets(unsigned Num) {
978    assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
979    LargeRep Rep = {
980      static_cast<BucketT*>(operator new(sizeof(BucketT) * Num)), Num
981    };
982    return Rep;
983  }
984};
985
986template<typename KeyT, typename ValueT,
987         typename KeyInfoT, bool IsConst>
988class DenseMapIterator {
989  typedef std::pair<KeyT, ValueT> Bucket;
990  typedef DenseMapIterator<KeyT, ValueT,
991                           KeyInfoT, true> ConstIterator;
992  friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, true>;
993public:
994  typedef ptrdiff_t difference_type;
995  typedef typename conditional<IsConst, const Bucket, Bucket>::type value_type;
996  typedef value_type *pointer;
997  typedef value_type &reference;
998  typedef std::forward_iterator_tag iterator_category;
999private:
1000  pointer Ptr, End;
1001public:
1002  DenseMapIterator() : Ptr(0), End(0) {}
1003
1004  DenseMapIterator(pointer Pos, pointer E, bool NoAdvance = false)
1005    : Ptr(Pos), End(E) {
1006    if (!NoAdvance) AdvancePastEmptyBuckets();
1007  }
1008
1009  // If IsConst is true this is a converting constructor from iterator to
1010  // const_iterator and the default copy constructor is used.
1011  // Otherwise this is a copy constructor for iterator.
1012  DenseMapIterator(const DenseMapIterator<KeyT, ValueT,
1013                                          KeyInfoT, false>& I)
1014    : Ptr(I.Ptr), End(I.End) {}
1015
1016  reference operator*() const {
1017    return *Ptr;
1018  }
1019  pointer operator->() const {
1020    return Ptr;
1021  }
1022
1023  bool operator==(const ConstIterator &RHS) const {
1024    return Ptr == RHS.operator->();
1025  }
1026  bool operator!=(const ConstIterator &RHS) const {
1027    return Ptr != RHS.operator->();
1028  }
1029
1030  inline DenseMapIterator& operator++() {  // Preincrement
1031    ++Ptr;
1032    AdvancePastEmptyBuckets();
1033    return *this;
1034  }
1035  DenseMapIterator operator++(int) {  // Postincrement
1036    DenseMapIterator tmp = *this; ++*this; return tmp;
1037  }
1038
1039private:
1040  void AdvancePastEmptyBuckets() {
1041    const KeyT Empty = KeyInfoT::getEmptyKey();
1042    const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1043
1044    while (Ptr != End &&
1045           (KeyInfoT::isEqual(Ptr->first, Empty) ||
1046            KeyInfoT::isEqual(Ptr->first, Tombstone)))
1047      ++Ptr;
1048  }
1049};
1050
1051template<typename KeyT, typename ValueT, typename KeyInfoT>
1052static inline size_t
1053capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) {
1054  return X.getMemorySize();
1055}
1056
1057} // end namespace llvm
1058
1059#endif
1060