DenseMap.h revision 9e8eafa0f0ad36c3e4397e97b67b8245b04ba618
1//===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the DenseMap class.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_ADT_DENSEMAP_H
15#define LLVM_ADT_DENSEMAP_H
16
17#include "llvm/ADT/DenseMapInfo.h"
18#include "llvm/Support/AlignOf.h"
19#include "llvm/Support/Compiler.h"
20#include "llvm/Support/MathExtras.h"
21#include "llvm/Support/PointerLikeTypeTraits.h"
22#include "llvm/Support/type_traits.h"
23#include <algorithm>
24#include <cassert>
25#include <climits>
26#include <cstddef>
27#include <cstring>
28#include <iterator>
29#include <new>
30#include <utility>
31
32namespace llvm {
33
34template<typename KeyT, typename ValueT,
35         typename KeyInfoT = DenseMapInfo<KeyT>,
36         bool IsConst = false>
37class DenseMapIterator;
38
39template<typename DerivedT,
40         typename KeyT, typename ValueT, typename KeyInfoT>
41class DenseMapBase {
42protected:
43  typedef std::pair<KeyT, ValueT> BucketT;
44
45public:
46  typedef KeyT key_type;
47  typedef ValueT mapped_type;
48  typedef BucketT value_type;
49
50  typedef DenseMapIterator<KeyT, ValueT, KeyInfoT> iterator;
51  typedef DenseMapIterator<KeyT, ValueT,
52                           KeyInfoT, true> const_iterator;
53  inline iterator begin() {
54    // When the map is empty, avoid the overhead of AdvancePastEmptyBuckets().
55    return empty() ? end() : iterator(getBuckets(), getBucketsEnd());
56  }
57  inline iterator end() {
58    return iterator(getBucketsEnd(), getBucketsEnd(), true);
59  }
60  inline const_iterator begin() const {
61    return empty() ? end() : const_iterator(getBuckets(), getBucketsEnd());
62  }
63  inline const_iterator end() const {
64    return const_iterator(getBucketsEnd(), getBucketsEnd(), true);
65  }
66
67  bool empty() const { return getNumEntries() == 0; }
68  unsigned size() const { return getNumEntries(); }
69
70  /// Grow the densemap so that it has at least Size buckets. Does not shrink
71  void resize(size_t Size) {
72    if (Size > getNumBuckets())
73      grow(Size);
74  }
75
76  void clear() {
77    if (getNumEntries() == 0 && getNumTombstones() == 0) return;
78
79    // If the capacity of the array is huge, and the # elements used is small,
80    // shrink the array.
81    if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
82      shrink_and_clear();
83      return;
84    }
85
86    const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
87    for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
88      if (!KeyInfoT::isEqual(P->first, EmptyKey)) {
89        if (!KeyInfoT::isEqual(P->first, TombstoneKey)) {
90          P->second.~ValueT();
91          decrementNumEntries();
92        }
93        P->first = EmptyKey;
94      }
95    }
96    assert(getNumEntries() == 0 && "Node count imbalance!");
97    setNumTombstones(0);
98  }
99
100  /// count - Return true if the specified key is in the map.
101  bool count(const KeyT &Val) const {
102    const BucketT *TheBucket;
103    return LookupBucketFor(Val, TheBucket);
104  }
105
106  iterator find(const KeyT &Val) {
107    BucketT *TheBucket;
108    if (LookupBucketFor(Val, TheBucket))
109      return iterator(TheBucket, getBucketsEnd(), true);
110    return end();
111  }
112  const_iterator find(const KeyT &Val) const {
113    const BucketT *TheBucket;
114    if (LookupBucketFor(Val, TheBucket))
115      return const_iterator(TheBucket, getBucketsEnd(), true);
116    return end();
117  }
118
119  /// Alternate version of find() which allows a different, and possibly
120  /// less expensive, key type.
121  /// The DenseMapInfo is responsible for supplying methods
122  /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
123  /// type used.
124  template<class LookupKeyT>
125  iterator find_as(const LookupKeyT &Val) {
126    BucketT *TheBucket;
127    if (LookupBucketFor(Val, TheBucket))
128      return iterator(TheBucket, getBucketsEnd(), true);
129    return end();
130  }
131  template<class LookupKeyT>
132  const_iterator find_as(const LookupKeyT &Val) const {
133    const BucketT *TheBucket;
134    if (LookupBucketFor(Val, TheBucket))
135      return const_iterator(TheBucket, getBucketsEnd(), true);
136    return end();
137  }
138
139  /// lookup - Return the entry for the specified key, or a default
140  /// constructed value if no such entry exists.
141  ValueT lookup(const KeyT &Val) const {
142    const BucketT *TheBucket;
143    if (LookupBucketFor(Val, TheBucket))
144      return TheBucket->second;
145    return ValueT();
146  }
147
148  // Inserts key,value pair into the map if the key isn't already in the map.
149  // If the key is already in the map, it returns false and doesn't update the
150  // value.
151  std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
152    BucketT *TheBucket;
153    if (LookupBucketFor(KV.first, TheBucket))
154      return std::make_pair(iterator(TheBucket, getBucketsEnd(), true),
155                            false); // Already in map.
156
157    // Otherwise, insert the new element.
158    TheBucket = InsertIntoBucket(KV.first, KV.second, TheBucket);
159    return std::make_pair(iterator(TheBucket, getBucketsEnd(), true), true);
160  }
161
162#if LLVM_HAS_RVALUE_REFERENCES
163  // Inserts key,value pair into the map if the key isn't already in the map.
164  // If the key is already in the map, it returns false and doesn't update the
165  // value.
166  std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
167    BucketT *TheBucket;
168    if (LookupBucketFor(KV.first, TheBucket))
169      return std::make_pair(iterator(TheBucket, getBucketsEnd(), true),
170                            false); // Already in map.
171
172    // Otherwise, insert the new element.
173    TheBucket = InsertIntoBucket(std::move(KV.first),
174                                 std::move(KV.second),
175                                 TheBucket);
176    return std::make_pair(iterator(TheBucket, getBucketsEnd(), true), true);
177  }
178#endif
179
180  /// insert - Range insertion of pairs.
181  template<typename InputIt>
182  void insert(InputIt I, InputIt E) {
183    for (; I != E; ++I)
184      insert(*I);
185  }
186
187
188  bool erase(const KeyT &Val) {
189    BucketT *TheBucket;
190    if (!LookupBucketFor(Val, TheBucket))
191      return false; // not in map.
192
193    TheBucket->second.~ValueT();
194    TheBucket->first = getTombstoneKey();
195    decrementNumEntries();
196    incrementNumTombstones();
197    return true;
198  }
199  void erase(iterator I) {
200    BucketT *TheBucket = &*I;
201    TheBucket->second.~ValueT();
202    TheBucket->first = getTombstoneKey();
203    decrementNumEntries();
204    incrementNumTombstones();
205  }
206
207  value_type& FindAndConstruct(const KeyT &Key) {
208    BucketT *TheBucket;
209    if (LookupBucketFor(Key, TheBucket))
210      return *TheBucket;
211
212    return *InsertIntoBucket(Key, ValueT(), TheBucket);
213  }
214
215  ValueT &operator[](const KeyT &Key) {
216    return FindAndConstruct(Key).second;
217  }
218
219#if LLVM_HAS_RVALUE_REFERENCES
220  value_type& FindAndConstruct(KeyT &&Key) {
221    BucketT *TheBucket;
222    if (LookupBucketFor(Key, TheBucket))
223      return *TheBucket;
224
225    return *InsertIntoBucket(std::move(Key), ValueT(), TheBucket);
226  }
227
228  ValueT &operator[](KeyT &&Key) {
229    return FindAndConstruct(std::move(Key)).second;
230  }
231#endif
232
233  /// isPointerIntoBucketsArray - Return true if the specified pointer points
234  /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
235  /// value in the DenseMap).
236  bool isPointerIntoBucketsArray(const void *Ptr) const {
237    return Ptr >= getBuckets() && Ptr < getBucketsEnd();
238  }
239
240  /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
241  /// array.  In conjunction with the previous method, this can be used to
242  /// determine whether an insertion caused the DenseMap to reallocate.
243  const void *getPointerIntoBucketsArray() const { return getBuckets(); }
244
245protected:
246  DenseMapBase() {}
247
248  void destroyAll() {
249    if (getNumBuckets() == 0) // Nothing to do.
250      return;
251
252    const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
253    for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
254      if (!KeyInfoT::isEqual(P->first, EmptyKey) &&
255          !KeyInfoT::isEqual(P->first, TombstoneKey))
256        P->second.~ValueT();
257      P->first.~KeyT();
258    }
259
260#ifndef NDEBUG
261    memset((void*)getBuckets(), 0x5a, sizeof(BucketT)*getNumBuckets());
262#endif
263  }
264
265  void initEmpty() {
266    setNumEntries(0);
267    setNumTombstones(0);
268
269    assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
270           "# initial buckets must be a power of two!");
271    const KeyT EmptyKey = getEmptyKey();
272    for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
273      new (&B->first) KeyT(EmptyKey);
274  }
275
276  void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
277    initEmpty();
278
279    // Insert all the old elements.
280    const KeyT EmptyKey = getEmptyKey();
281    const KeyT TombstoneKey = getTombstoneKey();
282    for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
283      if (!KeyInfoT::isEqual(B->first, EmptyKey) &&
284          !KeyInfoT::isEqual(B->first, TombstoneKey)) {
285        // Insert the key/value into the new table.
286        BucketT *DestBucket;
287        bool FoundVal = LookupBucketFor(B->first, DestBucket);
288        (void)FoundVal; // silence warning.
289        assert(!FoundVal && "Key already in new map?");
290        DestBucket->first = llvm_move(B->first);
291        new (&DestBucket->second) ValueT(llvm_move(B->second));
292        incrementNumEntries();
293
294        // Free the value.
295        B->second.~ValueT();
296      }
297      B->first.~KeyT();
298    }
299
300#ifndef NDEBUG
301    if (OldBucketsBegin != OldBucketsEnd)
302      memset((void*)OldBucketsBegin, 0x5a,
303             sizeof(BucketT) * (OldBucketsEnd - OldBucketsBegin));
304#endif
305  }
306
307  template <typename OtherBaseT>
308  void copyFrom(const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT>& other) {
309    assert(getNumBuckets() == other.getNumBuckets());
310
311    setNumEntries(other.getNumEntries());
312    setNumTombstones(other.getNumTombstones());
313
314    if (isPodLike<KeyT>::value && isPodLike<ValueT>::value)
315      memcpy(getBuckets(), other.getBuckets(),
316             getNumBuckets() * sizeof(BucketT));
317    else
318      for (size_t i = 0; i < getNumBuckets(); ++i) {
319        new (&getBuckets()[i].first) KeyT(other.getBuckets()[i].first);
320        if (!KeyInfoT::isEqual(getBuckets()[i].first, getEmptyKey()) &&
321            !KeyInfoT::isEqual(getBuckets()[i].first, getTombstoneKey()))
322          new (&getBuckets()[i].second) ValueT(other.getBuckets()[i].second);
323      }
324  }
325
326  void swap(DenseMapBase& RHS) {
327    std::swap(getNumEntries(), RHS.getNumEntries());
328    std::swap(getNumTombstones(), RHS.getNumTombstones());
329  }
330
331  static unsigned getHashValue(const KeyT &Val) {
332    return KeyInfoT::getHashValue(Val);
333  }
334  template<typename LookupKeyT>
335  static unsigned getHashValue(const LookupKeyT &Val) {
336    return KeyInfoT::getHashValue(Val);
337  }
338  static const KeyT getEmptyKey() {
339    return KeyInfoT::getEmptyKey();
340  }
341  static const KeyT getTombstoneKey() {
342    return KeyInfoT::getTombstoneKey();
343  }
344
345private:
346  unsigned getNumEntries() const {
347    return static_cast<const DerivedT *>(this)->getNumEntries();
348  }
349  void setNumEntries(unsigned Num) {
350    static_cast<DerivedT *>(this)->setNumEntries(Num);
351  }
352  void incrementNumEntries() {
353    setNumEntries(getNumEntries() + 1);
354  }
355  void decrementNumEntries() {
356    setNumEntries(getNumEntries() - 1);
357  }
358  unsigned getNumTombstones() const {
359    return static_cast<const DerivedT *>(this)->getNumTombstones();
360  }
361  void setNumTombstones(unsigned Num) {
362    static_cast<DerivedT *>(this)->setNumTombstones(Num);
363  }
364  void incrementNumTombstones() {
365    setNumTombstones(getNumTombstones() + 1);
366  }
367  void decrementNumTombstones() {
368    setNumTombstones(getNumTombstones() - 1);
369  }
370  const BucketT *getBuckets() const {
371    return static_cast<const DerivedT *>(this)->getBuckets();
372  }
373  BucketT *getBuckets() {
374    return static_cast<DerivedT *>(this)->getBuckets();
375  }
376  unsigned getNumBuckets() const {
377    return static_cast<const DerivedT *>(this)->getNumBuckets();
378  }
379  BucketT *getBucketsEnd() {
380    return getBuckets() + getNumBuckets();
381  }
382  const BucketT *getBucketsEnd() const {
383    return getBuckets() + getNumBuckets();
384  }
385
386  void grow(unsigned AtLeast) {
387    static_cast<DerivedT *>(this)->grow(AtLeast);
388  }
389
390  void shrink_and_clear() {
391    static_cast<DerivedT *>(this)->shrink_and_clear();
392  }
393
394
395  BucketT *InsertIntoBucket(const KeyT &Key, const ValueT &Value,
396                            BucketT *TheBucket) {
397    TheBucket = InsertIntoBucketImpl(Key, TheBucket);
398
399    TheBucket->first = Key;
400    new (&TheBucket->second) ValueT(Value);
401    return TheBucket;
402  }
403
404#if LLVM_HAS_RVALUE_REFERENCES
405  BucketT *InsertIntoBucket(const KeyT &Key, ValueT &&Value,
406                            BucketT *TheBucket) {
407    TheBucket = InsertIntoBucketImpl(Key, TheBucket);
408
409    TheBucket->first = Key;
410    new (&TheBucket->second) ValueT(std::move(Value));
411    return TheBucket;
412  }
413
414  BucketT *InsertIntoBucket(KeyT &&Key, ValueT &&Value, BucketT *TheBucket) {
415    TheBucket = InsertIntoBucketImpl(Key, TheBucket);
416
417    TheBucket->first = std::move(Key);
418    new (&TheBucket->second) ValueT(std::move(Value));
419    return TheBucket;
420  }
421#endif
422
423  BucketT *InsertIntoBucketImpl(const KeyT &Key, BucketT *TheBucket) {
424    // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
425    // the buckets are empty (meaning that many are filled with tombstones),
426    // grow the table.
427    //
428    // The later case is tricky.  For example, if we had one empty bucket with
429    // tons of tombstones, failing lookups (e.g. for insertion) would have to
430    // probe almost the entire table until it found the empty bucket.  If the
431    // table completely filled with tombstones, no lookup would ever succeed,
432    // causing infinite loops in lookup.
433    unsigned NewNumEntries = getNumEntries() + 1;
434    unsigned NumBuckets = getNumBuckets();
435    if (NewNumEntries*4 >= NumBuckets*3) {
436      this->grow(NumBuckets * 2);
437      LookupBucketFor(Key, TheBucket);
438      NumBuckets = getNumBuckets();
439    }
440    if (NumBuckets-(NewNumEntries+getNumTombstones()) <= NumBuckets/8) {
441      this->grow(NumBuckets * 2);
442      LookupBucketFor(Key, TheBucket);
443    }
444    assert(TheBucket);
445
446    // Only update the state after we've grown our bucket space appropriately
447    // so that when growing buckets we have self-consistent entry count.
448    incrementNumEntries();
449
450    // If we are writing over a tombstone, remember this.
451    const KeyT EmptyKey = getEmptyKey();
452    if (!KeyInfoT::isEqual(TheBucket->first, EmptyKey))
453      decrementNumTombstones();
454
455    return TheBucket;
456  }
457
458  /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
459  /// FoundBucket.  If the bucket contains the key and a value, this returns
460  /// true, otherwise it returns a bucket with an empty marker or tombstone and
461  /// returns false.
462  template<typename LookupKeyT>
463  bool LookupBucketFor(const LookupKeyT &Val,
464                       const BucketT *&FoundBucket) const {
465    const BucketT *BucketsPtr = getBuckets();
466    const unsigned NumBuckets = getNumBuckets();
467
468    if (NumBuckets == 0) {
469      FoundBucket = 0;
470      return false;
471    }
472
473    // FoundTombstone - Keep track of whether we find a tombstone while probing.
474    const BucketT *FoundTombstone = 0;
475    const KeyT EmptyKey = getEmptyKey();
476    const KeyT TombstoneKey = getTombstoneKey();
477    assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
478           !KeyInfoT::isEqual(Val, TombstoneKey) &&
479           "Empty/Tombstone value shouldn't be inserted into map!");
480
481    unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
482    unsigned ProbeAmt = 1;
483    while (1) {
484      const BucketT *ThisBucket = BucketsPtr + BucketNo;
485      // Found Val's bucket?  If so, return it.
486      if (KeyInfoT::isEqual(Val, ThisBucket->first)) {
487        FoundBucket = ThisBucket;
488        return true;
489      }
490
491      // If we found an empty bucket, the key doesn't exist in the set.
492      // Insert it and return the default value.
493      if (KeyInfoT::isEqual(ThisBucket->first, EmptyKey)) {
494        // If we've already seen a tombstone while probing, fill it in instead
495        // of the empty bucket we eventually probed to.
496        FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
497        return false;
498      }
499
500      // If this is a tombstone, remember it.  If Val ends up not in the map, we
501      // prefer to return it than something that would require more probing.
502      if (KeyInfoT::isEqual(ThisBucket->first, TombstoneKey) && !FoundTombstone)
503        FoundTombstone = ThisBucket;  // Remember the first tombstone found.
504
505      // Otherwise, it's a hash collision or a tombstone, continue quadratic
506      // probing.
507      BucketNo += ProbeAmt++;
508      BucketNo &= (NumBuckets-1);
509    }
510  }
511
512  template <typename LookupKeyT>
513  bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
514    const BucketT *ConstFoundBucket;
515    bool Result = const_cast<const DenseMapBase *>(this)
516      ->LookupBucketFor(Val, ConstFoundBucket);
517    FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
518    return Result;
519  }
520
521public:
522  /// Return the approximate size (in bytes) of the actual map.
523  /// This is just the raw memory used by DenseMap.
524  /// If entries are pointers to objects, the size of the referenced objects
525  /// are not included.
526  size_t getMemorySize() const {
527    return getNumBuckets() * sizeof(BucketT);
528  }
529};
530
531template<typename KeyT, typename ValueT,
532         typename KeyInfoT = DenseMapInfo<KeyT> >
533class DenseMap
534    : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT>,
535                          KeyT, ValueT, KeyInfoT> {
536  // Lift some types from the dependent base class into this class for
537  // simplicity of referring to them.
538  typedef DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT> BaseT;
539  typedef typename BaseT::BucketT BucketT;
540  friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT>;
541
542  BucketT *Buckets;
543  unsigned NumEntries;
544  unsigned NumTombstones;
545  unsigned NumBuckets;
546
547public:
548  explicit DenseMap(unsigned NumInitBuckets = 0) {
549    init(NumInitBuckets);
550  }
551
552  DenseMap(const DenseMap &other) : BaseT() {
553    init(0);
554    copyFrom(other);
555  }
556
557#if LLVM_HAS_RVALUE_REFERENCES
558  DenseMap(DenseMap &&other) : BaseT() {
559    init(0);
560    swap(other);
561  }
562#endif
563
564  template<typename InputIt>
565  DenseMap(const InputIt &I, const InputIt &E) {
566    init(NextPowerOf2(std::distance(I, E)));
567    this->insert(I, E);
568  }
569
570  ~DenseMap() {
571    this->destroyAll();
572    operator delete(Buckets);
573  }
574
575  void swap(DenseMap& RHS) {
576    std::swap(Buckets, RHS.Buckets);
577    std::swap(NumEntries, RHS.NumEntries);
578    std::swap(NumTombstones, RHS.NumTombstones);
579    std::swap(NumBuckets, RHS.NumBuckets);
580  }
581
582  DenseMap& operator=(const DenseMap& other) {
583    copyFrom(other);
584    return *this;
585  }
586
587#if LLVM_HAS_RVALUE_REFERENCES
588  DenseMap& operator=(DenseMap &&other) {
589    this->destroyAll();
590    operator delete(Buckets);
591    init(0);
592    swap(other);
593    return *this;
594  }
595#endif
596
597  void copyFrom(const DenseMap& other) {
598    this->destroyAll();
599    operator delete(Buckets);
600    if (allocateBuckets(other.NumBuckets)) {
601      this->BaseT::copyFrom(other);
602    } else {
603      NumEntries = 0;
604      NumTombstones = 0;
605    }
606  }
607
608  void init(unsigned InitBuckets) {
609    assert(!KeyInfoT::isEqual(this->getEmptyKey(), this->getTombstoneKey()) &&
610           "Bad implementation of KeyInfoT: empty key and tombstone key "
611           "should be different");
612    if (allocateBuckets(InitBuckets)) {
613      this->BaseT::initEmpty();
614    } else {
615      NumEntries = 0;
616      NumTombstones = 0;
617    }
618  }
619
620  void grow(unsigned AtLeast) {
621    unsigned OldNumBuckets = NumBuckets;
622    BucketT *OldBuckets = Buckets;
623
624    allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
625    assert(Buckets);
626    if (!OldBuckets) {
627      this->BaseT::initEmpty();
628      return;
629    }
630
631    this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
632
633    // Free the old table.
634    operator delete(OldBuckets);
635  }
636
637  void shrink_and_clear() {
638    unsigned OldNumEntries = NumEntries;
639    this->destroyAll();
640
641    // Reduce the number of buckets.
642    unsigned NewNumBuckets = 0;
643    if (OldNumEntries)
644      NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
645    if (NewNumBuckets == NumBuckets) {
646      this->BaseT::initEmpty();
647      return;
648    }
649
650    operator delete(Buckets);
651    init(NewNumBuckets);
652  }
653
654private:
655  unsigned getNumEntries() const {
656    return NumEntries;
657  }
658  void setNumEntries(unsigned Num) {
659    NumEntries = Num;
660  }
661
662  unsigned getNumTombstones() const {
663    return NumTombstones;
664  }
665  void setNumTombstones(unsigned Num) {
666    NumTombstones = Num;
667  }
668
669  BucketT *getBuckets() const {
670    return Buckets;
671  }
672
673  unsigned getNumBuckets() const {
674    return NumBuckets;
675  }
676
677  bool allocateBuckets(unsigned Num) {
678    NumBuckets = Num;
679    if (NumBuckets == 0) {
680      Buckets = 0;
681      return false;
682    }
683
684    Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT) * NumBuckets));
685    return true;
686  }
687};
688
689template<typename KeyT, typename ValueT,
690         unsigned InlineBuckets = 4,
691         typename KeyInfoT = DenseMapInfo<KeyT> >
692class SmallDenseMap
693    : public DenseMapBase<SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT>,
694                          KeyT, ValueT, KeyInfoT> {
695  // Lift some types from the dependent base class into this class for
696  // simplicity of referring to them.
697  typedef DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT> BaseT;
698  typedef typename BaseT::BucketT BucketT;
699  friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT>;
700
701  unsigned Small : 1;
702  unsigned NumEntries : 31;
703  unsigned NumTombstones;
704
705  struct LargeRep {
706    BucketT *Buckets;
707    unsigned NumBuckets;
708  };
709
710  /// A "union" of an inline bucket array and the struct representing
711  /// a large bucket. This union will be discriminated by the 'Small' bit.
712  AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;
713
714public:
715  explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
716    init(NumInitBuckets);
717  }
718
719  SmallDenseMap(const SmallDenseMap &other) {
720    init(0);
721    copyFrom(other);
722  }
723
724#if LLVM_HAS_RVALUE_REFERENCES
725  SmallDenseMap(SmallDenseMap &&other) {
726    init(0);
727    swap(other);
728  }
729#endif
730
731  template<typename InputIt>
732  SmallDenseMap(const InputIt &I, const InputIt &E) {
733    init(NextPowerOf2(std::distance(I, E)));
734    this->insert(I, E);
735  }
736
737  ~SmallDenseMap() {
738    this->destroyAll();
739    deallocateBuckets();
740  }
741
742  void swap(SmallDenseMap& RHS) {
743    unsigned TmpNumEntries = RHS.NumEntries;
744    RHS.NumEntries = NumEntries;
745    NumEntries = TmpNumEntries;
746    std::swap(NumTombstones, RHS.NumTombstones);
747
748    const KeyT EmptyKey = this->getEmptyKey();
749    const KeyT TombstoneKey = this->getTombstoneKey();
750    if (Small && RHS.Small) {
751      // If we're swapping inline bucket arrays, we have to cope with some of
752      // the tricky bits of DenseMap's storage system: the buckets are not
753      // fully initialized. Thus we swap every key, but we may have
754      // a one-directional move of the value.
755      for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
756        BucketT *LHSB = &getInlineBuckets()[i],
757                *RHSB = &RHS.getInlineBuckets()[i];
758        bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->first, EmptyKey) &&
759                            !KeyInfoT::isEqual(LHSB->first, TombstoneKey));
760        bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->first, EmptyKey) &&
761                            !KeyInfoT::isEqual(RHSB->first, TombstoneKey));
762        if (hasLHSValue && hasRHSValue) {
763          // Swap together if we can...
764          std::swap(*LHSB, *RHSB);
765          continue;
766        }
767        // Swap separately and handle any assymetry.
768        std::swap(LHSB->first, RHSB->first);
769        if (hasLHSValue) {
770          new (&RHSB->second) ValueT(llvm_move(LHSB->second));
771          LHSB->second.~ValueT();
772        } else if (hasRHSValue) {
773          new (&LHSB->second) ValueT(llvm_move(RHSB->second));
774          RHSB->second.~ValueT();
775        }
776      }
777      return;
778    }
779    if (!Small && !RHS.Small) {
780      std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
781      std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
782      return;
783    }
784
785    SmallDenseMap &SmallSide = Small ? *this : RHS;
786    SmallDenseMap &LargeSide = Small ? RHS : *this;
787
788    // First stash the large side's rep and move the small side across.
789    LargeRep TmpRep = llvm_move(*LargeSide.getLargeRep());
790    LargeSide.getLargeRep()->~LargeRep();
791    LargeSide.Small = true;
792    // This is similar to the standard move-from-old-buckets, but the bucket
793    // count hasn't actually rotated in this case. So we have to carefully
794    // move construct the keys and values into their new locations, but there
795    // is no need to re-hash things.
796    for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
797      BucketT *NewB = &LargeSide.getInlineBuckets()[i],
798              *OldB = &SmallSide.getInlineBuckets()[i];
799      new (&NewB->first) KeyT(llvm_move(OldB->first));
800      OldB->first.~KeyT();
801      if (!KeyInfoT::isEqual(NewB->first, EmptyKey) &&
802          !KeyInfoT::isEqual(NewB->first, TombstoneKey)) {
803        new (&NewB->second) ValueT(llvm_move(OldB->second));
804        OldB->second.~ValueT();
805      }
806    }
807
808    // The hard part of moving the small buckets across is done, just move
809    // the TmpRep into its new home.
810    SmallSide.Small = false;
811    new (SmallSide.getLargeRep()) LargeRep(llvm_move(TmpRep));
812  }
813
814  SmallDenseMap& operator=(const SmallDenseMap& other) {
815    copyFrom(other);
816    return *this;
817  }
818
819#if LLVM_HAS_RVALUE_REFERENCES
820  SmallDenseMap& operator=(SmallDenseMap &&other) {
821    this->destroyAll();
822    deallocateBuckets();
823    init(0);
824    swap(other);
825    return *this;
826  }
827#endif
828
829  void copyFrom(const SmallDenseMap& other) {
830    this->destroyAll();
831    deallocateBuckets();
832    Small = true;
833    if (other.getNumBuckets() > InlineBuckets) {
834      Small = false;
835      allocateBuckets(other.getNumBuckets());
836    }
837    this->BaseT::copyFrom(other);
838  }
839
840  void init(unsigned InitBuckets) {
841    Small = true;
842    if (InitBuckets > InlineBuckets) {
843      Small = false;
844      new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
845    }
846    this->BaseT::initEmpty();
847  }
848
849  void grow(unsigned AtLeast) {
850    if (AtLeast >= InlineBuckets)
851      AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1));
852
853    if (Small) {
854      if (AtLeast < InlineBuckets)
855        return; // Nothing to do.
856
857      // First move the inline buckets into a temporary storage.
858      AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
859      BucketT *TmpBegin = reinterpret_cast<BucketT *>(TmpStorage.buffer);
860      BucketT *TmpEnd = TmpBegin;
861
862      // Loop over the buckets, moving non-empty, non-tombstones into the
863      // temporary storage. Have the loop move the TmpEnd forward as it goes.
864      const KeyT EmptyKey = this->getEmptyKey();
865      const KeyT TombstoneKey = this->getTombstoneKey();
866      for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
867        if (!KeyInfoT::isEqual(P->first, EmptyKey) &&
868            !KeyInfoT::isEqual(P->first, TombstoneKey)) {
869          assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
870                 "Too many inline buckets!");
871          new (&TmpEnd->first) KeyT(llvm_move(P->first));
872          new (&TmpEnd->second) ValueT(llvm_move(P->second));
873          ++TmpEnd;
874          P->second.~ValueT();
875        }
876        P->first.~KeyT();
877      }
878
879      // Now make this map use the large rep, and move all the entries back
880      // into it.
881      Small = false;
882      new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
883      this->moveFromOldBuckets(TmpBegin, TmpEnd);
884      return;
885    }
886
887    LargeRep OldRep = llvm_move(*getLargeRep());
888    getLargeRep()->~LargeRep();
889    if (AtLeast <= InlineBuckets) {
890      Small = true;
891    } else {
892      new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
893    }
894
895    this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
896
897    // Free the old table.
898    operator delete(OldRep.Buckets);
899  }
900
901  void shrink_and_clear() {
902    unsigned OldSize = this->size();
903    this->destroyAll();
904
905    // Reduce the number of buckets.
906    unsigned NewNumBuckets = 0;
907    if (OldSize) {
908      NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
909      if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
910        NewNumBuckets = 64;
911    }
912    if ((Small && NewNumBuckets <= InlineBuckets) ||
913        (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
914      this->BaseT::initEmpty();
915      return;
916    }
917
918    deallocateBuckets();
919    init(NewNumBuckets);
920  }
921
922private:
923  unsigned getNumEntries() const {
924    return NumEntries;
925  }
926  void setNumEntries(unsigned Num) {
927    assert(Num < INT_MAX && "Cannot support more than INT_MAX entries");
928    NumEntries = Num;
929  }
930
931  unsigned getNumTombstones() const {
932    return NumTombstones;
933  }
934  void setNumTombstones(unsigned Num) {
935    NumTombstones = Num;
936  }
937
938  const BucketT *getInlineBuckets() const {
939    assert(Small);
940    // Note that this cast does not violate aliasing rules as we assert that
941    // the memory's dynamic type is the small, inline bucket buffer, and the
942    // 'storage.buffer' static type is 'char *'.
943    return reinterpret_cast<const BucketT *>(storage.buffer);
944  }
945  BucketT *getInlineBuckets() {
946    return const_cast<BucketT *>(
947      const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
948  }
949  const LargeRep *getLargeRep() const {
950    assert(!Small);
951    // Note, same rule about aliasing as with getInlineBuckets.
952    return reinterpret_cast<const LargeRep *>(storage.buffer);
953  }
954  LargeRep *getLargeRep() {
955    return const_cast<LargeRep *>(
956      const_cast<const SmallDenseMap *>(this)->getLargeRep());
957  }
958
959  const BucketT *getBuckets() const {
960    return Small ? getInlineBuckets() : getLargeRep()->Buckets;
961  }
962  BucketT *getBuckets() {
963    return const_cast<BucketT *>(
964      const_cast<const SmallDenseMap *>(this)->getBuckets());
965  }
966  unsigned getNumBuckets() const {
967    return Small ? InlineBuckets : getLargeRep()->NumBuckets;
968  }
969
970  void deallocateBuckets() {
971    if (Small)
972      return;
973
974    operator delete(getLargeRep()->Buckets);
975    getLargeRep()->~LargeRep();
976  }
977
978  LargeRep allocateBuckets(unsigned Num) {
979    assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
980    LargeRep Rep = {
981      static_cast<BucketT*>(operator new(sizeof(BucketT) * Num)), Num
982    };
983    return Rep;
984  }
985};
986
987template<typename KeyT, typename ValueT,
988         typename KeyInfoT, bool IsConst>
989class DenseMapIterator {
990  typedef std::pair<KeyT, ValueT> Bucket;
991  typedef DenseMapIterator<KeyT, ValueT,
992                           KeyInfoT, true> ConstIterator;
993  friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, true>;
994public:
995  typedef ptrdiff_t difference_type;
996  typedef typename conditional<IsConst, const Bucket, Bucket>::type value_type;
997  typedef value_type *pointer;
998  typedef value_type &reference;
999  typedef std::forward_iterator_tag iterator_category;
1000private:
1001  pointer Ptr, End;
1002public:
1003  DenseMapIterator() : Ptr(0), End(0) {}
1004
1005  DenseMapIterator(pointer Pos, pointer E, bool NoAdvance = false)
1006    : Ptr(Pos), End(E) {
1007    if (!NoAdvance) AdvancePastEmptyBuckets();
1008  }
1009
1010  // If IsConst is true this is a converting constructor from iterator to
1011  // const_iterator and the default copy constructor is used.
1012  // Otherwise this is a copy constructor for iterator.
1013  DenseMapIterator(const DenseMapIterator<KeyT, ValueT,
1014                                          KeyInfoT, false>& I)
1015    : Ptr(I.Ptr), End(I.End) {}
1016
1017  reference operator*() const {
1018    return *Ptr;
1019  }
1020  pointer operator->() const {
1021    return Ptr;
1022  }
1023
1024  bool operator==(const ConstIterator &RHS) const {
1025    return Ptr == RHS.operator->();
1026  }
1027  bool operator!=(const ConstIterator &RHS) const {
1028    return Ptr != RHS.operator->();
1029  }
1030
1031  inline DenseMapIterator& operator++() {  // Preincrement
1032    ++Ptr;
1033    AdvancePastEmptyBuckets();
1034    return *this;
1035  }
1036  DenseMapIterator operator++(int) {  // Postincrement
1037    DenseMapIterator tmp = *this; ++*this; return tmp;
1038  }
1039
1040private:
1041  void AdvancePastEmptyBuckets() {
1042    const KeyT Empty = KeyInfoT::getEmptyKey();
1043    const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1044
1045    while (Ptr != End &&
1046           (KeyInfoT::isEqual(Ptr->first, Empty) ||
1047            KeyInfoT::isEqual(Ptr->first, Tombstone)))
1048      ++Ptr;
1049  }
1050};
1051
1052template<typename KeyT, typename ValueT, typename KeyInfoT>
1053static inline size_t
1054capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) {
1055  return X.getMemorySize();
1056}
1057
1058} // end namespace llvm
1059
1060#endif
1061