DenseMap.h revision 255cd6f317f3a0bad6e7939ca5ce49b33c6676f9
1//===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the DenseMap class. 11// 12//===----------------------------------------------------------------------===// 13 14#ifndef LLVM_ADT_DENSEMAP_H 15#define LLVM_ADT_DENSEMAP_H 16 17#include "llvm/ADT/DenseMapInfo.h" 18#include "llvm/Support/AlignOf.h" 19#include "llvm/Support/Compiler.h" 20#include "llvm/Support/MathExtras.h" 21#include "llvm/Support/PointerLikeTypeTraits.h" 22#include "llvm/Support/type_traits.h" 23#include <algorithm> 24#include <cassert> 25#include <climits> 26#include <cstddef> 27#include <cstring> 28#include <iterator> 29#include <new> 30#include <utility> 31 32namespace llvm { 33 34template<typename KeyT, typename ValueT, 35 typename KeyInfoT = DenseMapInfo<KeyT>, 36 bool IsConst = false> 37class DenseMapIterator; 38 39template<typename DerivedT, 40 typename KeyT, typename ValueT, typename KeyInfoT> 41class DenseMapBase { 42protected: 43 typedef std::pair<KeyT, ValueT> BucketT; 44 45public: 46 typedef KeyT key_type; 47 typedef ValueT mapped_type; 48 typedef BucketT value_type; 49 50 typedef DenseMapIterator<KeyT, ValueT, KeyInfoT> iterator; 51 typedef DenseMapIterator<KeyT, ValueT, 52 KeyInfoT, true> const_iterator; 53 inline iterator begin() { 54 // When the map is empty, avoid the overhead of AdvancePastEmptyBuckets(). 55 return empty() ? end() : iterator(getBuckets(), getBucketsEnd()); 56 } 57 inline iterator end() { 58 return iterator(getBucketsEnd(), getBucketsEnd(), true); 59 } 60 inline const_iterator begin() const { 61 return empty() ? end() : const_iterator(getBuckets(), getBucketsEnd()); 62 } 63 inline const_iterator end() const { 64 return const_iterator(getBucketsEnd(), getBucketsEnd(), true); 65 } 66 67 bool empty() const { return getNumEntries() == 0; } 68 unsigned size() const { return getNumEntries(); } 69 70 /// Grow the densemap so that it has at least Size buckets. Does not shrink 71 void resize(size_t Size) { 72 if (Size > getNumBuckets()) 73 grow(Size); 74 } 75 76 void clear() { 77 if (getNumEntries() == 0 && getNumTombstones() == 0) return; 78 79 // If the capacity of the array is huge, and the # elements used is small, 80 // shrink the array. 81 if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) { 82 shrink_and_clear(); 83 return; 84 } 85 86 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey(); 87 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) { 88 if (!KeyInfoT::isEqual(P->first, EmptyKey)) { 89 if (!KeyInfoT::isEqual(P->first, TombstoneKey)) { 90 P->second.~ValueT(); 91 decrementNumEntries(); 92 } 93 P->first = EmptyKey; 94 } 95 } 96 assert(getNumEntries() == 0 && "Node count imbalance!"); 97 setNumTombstones(0); 98 } 99 100 /// count - Return true if the specified key is in the map. 101 bool count(const KeyT &Val) const { 102 const BucketT *TheBucket; 103 return LookupBucketFor(Val, TheBucket); 104 } 105 106 iterator find(const KeyT &Val) { 107 BucketT *TheBucket; 108 if (LookupBucketFor(Val, TheBucket)) 109 return iterator(TheBucket, getBucketsEnd(), true); 110 return end(); 111 } 112 const_iterator find(const KeyT &Val) const { 113 const BucketT *TheBucket; 114 if (LookupBucketFor(Val, TheBucket)) 115 return const_iterator(TheBucket, getBucketsEnd(), true); 116 return end(); 117 } 118 119 /// Alternate version of find() which allows a different, and possibly 120 /// less expensive, key type. 121 /// The DenseMapInfo is responsible for supplying methods 122 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key 123 /// type used. 124 template<class LookupKeyT> 125 iterator find_as(const LookupKeyT &Val) { 126 BucketT *TheBucket; 127 if (LookupBucketFor(Val, TheBucket)) 128 return iterator(TheBucket, getBucketsEnd(), true); 129 return end(); 130 } 131 template<class LookupKeyT> 132 const_iterator find_as(const LookupKeyT &Val) const { 133 const BucketT *TheBucket; 134 if (LookupBucketFor(Val, TheBucket)) 135 return const_iterator(TheBucket, getBucketsEnd(), true); 136 return end(); 137 } 138 139 /// lookup - Return the entry for the specified key, or a default 140 /// constructed value if no such entry exists. 141 ValueT lookup(const KeyT &Val) const { 142 const BucketT *TheBucket; 143 if (LookupBucketFor(Val, TheBucket)) 144 return TheBucket->second; 145 return ValueT(); 146 } 147 148 // Inserts key,value pair into the map if the key isn't already in the map. 149 // If the key is already in the map, it returns false and doesn't update the 150 // value. 151 std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) { 152 BucketT *TheBucket; 153 if (LookupBucketFor(KV.first, TheBucket)) 154 return std::make_pair(iterator(TheBucket, getBucketsEnd(), true), 155 false); // Already in map. 156 157 // Otherwise, insert the new element. 158 TheBucket = InsertIntoBucket(KV.first, KV.second, TheBucket); 159 return std::make_pair(iterator(TheBucket, getBucketsEnd(), true), true); 160 } 161 162 /// insert - Range insertion of pairs. 163 template<typename InputIt> 164 void insert(InputIt I, InputIt E) { 165 for (; I != E; ++I) 166 insert(*I); 167 } 168 169 170 bool erase(const KeyT &Val) { 171 BucketT *TheBucket; 172 if (!LookupBucketFor(Val, TheBucket)) 173 return false; // not in map. 174 175 TheBucket->second.~ValueT(); 176 TheBucket->first = getTombstoneKey(); 177 decrementNumEntries(); 178 incrementNumTombstones(); 179 return true; 180 } 181 void erase(iterator I) { 182 BucketT *TheBucket = &*I; 183 TheBucket->second.~ValueT(); 184 TheBucket->first = getTombstoneKey(); 185 decrementNumEntries(); 186 incrementNumTombstones(); 187 } 188 189 value_type& FindAndConstruct(const KeyT &Key) { 190 BucketT *TheBucket; 191 if (LookupBucketFor(Key, TheBucket)) 192 return *TheBucket; 193 194 return *InsertIntoBucket(Key, ValueT(), TheBucket); 195 } 196 197 ValueT &operator[](const KeyT &Key) { 198 return FindAndConstruct(Key).second; 199 } 200 201#if LLVM_HAS_RVALUE_REFERENCES 202 value_type& FindAndConstruct(KeyT &&Key) { 203 BucketT *TheBucket; 204 if (LookupBucketFor(Key, TheBucket)) 205 return *TheBucket; 206 207 return *InsertIntoBucket(Key, ValueT(), TheBucket); 208 } 209 210 ValueT &operator[](KeyT &&Key) { 211 return FindAndConstruct(Key).second; 212 } 213#endif 214 215 /// isPointerIntoBucketsArray - Return true if the specified pointer points 216 /// somewhere into the DenseMap's array of buckets (i.e. either to a key or 217 /// value in the DenseMap). 218 bool isPointerIntoBucketsArray(const void *Ptr) const { 219 return Ptr >= getBuckets() && Ptr < getBucketsEnd(); 220 } 221 222 /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets 223 /// array. In conjunction with the previous method, this can be used to 224 /// determine whether an insertion caused the DenseMap to reallocate. 225 const void *getPointerIntoBucketsArray() const { return getBuckets(); } 226 227protected: 228 DenseMapBase() {} 229 230 void destroyAll() { 231 if (getNumBuckets() == 0) // Nothing to do. 232 return; 233 234 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey(); 235 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) { 236 if (!KeyInfoT::isEqual(P->first, EmptyKey) && 237 !KeyInfoT::isEqual(P->first, TombstoneKey)) 238 P->second.~ValueT(); 239 P->first.~KeyT(); 240 } 241 242#ifndef NDEBUG 243 memset((void*)getBuckets(), 0x5a, sizeof(BucketT)*getNumBuckets()); 244#endif 245 } 246 247 void initEmpty() { 248 setNumEntries(0); 249 setNumTombstones(0); 250 251 assert((getNumBuckets() & (getNumBuckets()-1)) == 0 && 252 "# initial buckets must be a power of two!"); 253 const KeyT EmptyKey = getEmptyKey(); 254 for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B) 255 new (&B->first) KeyT(EmptyKey); 256 } 257 258 void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) { 259 initEmpty(); 260 261 // Insert all the old elements. 262 const KeyT EmptyKey = getEmptyKey(); 263 const KeyT TombstoneKey = getTombstoneKey(); 264 for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) { 265 if (!KeyInfoT::isEqual(B->first, EmptyKey) && 266 !KeyInfoT::isEqual(B->first, TombstoneKey)) { 267 // Insert the key/value into the new table. 268 BucketT *DestBucket; 269 bool FoundVal = LookupBucketFor(B->first, DestBucket); 270 (void)FoundVal; // silence warning. 271 assert(!FoundVal && "Key already in new map?"); 272 DestBucket->first = llvm_move(B->first); 273 new (&DestBucket->second) ValueT(llvm_move(B->second)); 274 incrementNumEntries(); 275 276 // Free the value. 277 B->second.~ValueT(); 278 } 279 B->first.~KeyT(); 280 } 281 282#ifndef NDEBUG 283 if (OldBucketsBegin != OldBucketsEnd) 284 memset((void*)OldBucketsBegin, 0x5a, 285 sizeof(BucketT) * (OldBucketsEnd - OldBucketsBegin)); 286#endif 287 } 288 289 template <typename OtherBaseT> 290 void copyFrom(const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT>& other) { 291 assert(getNumBuckets() == other.getNumBuckets()); 292 293 setNumEntries(other.getNumEntries()); 294 setNumTombstones(other.getNumTombstones()); 295 296 if (isPodLike<KeyT>::value && isPodLike<ValueT>::value) 297 memcpy(getBuckets(), other.getBuckets(), 298 getNumBuckets() * sizeof(BucketT)); 299 else 300 for (size_t i = 0; i < getNumBuckets(); ++i) { 301 new (&getBuckets()[i].first) KeyT(other.getBuckets()[i].first); 302 if (!KeyInfoT::isEqual(getBuckets()[i].first, getEmptyKey()) && 303 !KeyInfoT::isEqual(getBuckets()[i].first, getTombstoneKey())) 304 new (&getBuckets()[i].second) ValueT(other.getBuckets()[i].second); 305 } 306 } 307 308 void swap(DenseMapBase& RHS) { 309 std::swap(getNumEntries(), RHS.getNumEntries()); 310 std::swap(getNumTombstones(), RHS.getNumTombstones()); 311 } 312 313 static unsigned getHashValue(const KeyT &Val) { 314 return KeyInfoT::getHashValue(Val); 315 } 316 template<typename LookupKeyT> 317 static unsigned getHashValue(const LookupKeyT &Val) { 318 return KeyInfoT::getHashValue(Val); 319 } 320 static const KeyT getEmptyKey() { 321 return KeyInfoT::getEmptyKey(); 322 } 323 static const KeyT getTombstoneKey() { 324 return KeyInfoT::getTombstoneKey(); 325 } 326 327private: 328 unsigned getNumEntries() const { 329 return static_cast<const DerivedT *>(this)->getNumEntries(); 330 } 331 void setNumEntries(unsigned Num) { 332 static_cast<DerivedT *>(this)->setNumEntries(Num); 333 } 334 void incrementNumEntries() { 335 setNumEntries(getNumEntries() + 1); 336 } 337 void decrementNumEntries() { 338 setNumEntries(getNumEntries() - 1); 339 } 340 unsigned getNumTombstones() const { 341 return static_cast<const DerivedT *>(this)->getNumTombstones(); 342 } 343 void setNumTombstones(unsigned Num) { 344 static_cast<DerivedT *>(this)->setNumTombstones(Num); 345 } 346 void incrementNumTombstones() { 347 setNumTombstones(getNumTombstones() + 1); 348 } 349 void decrementNumTombstones() { 350 setNumTombstones(getNumTombstones() - 1); 351 } 352 const BucketT *getBuckets() const { 353 return static_cast<const DerivedT *>(this)->getBuckets(); 354 } 355 BucketT *getBuckets() { 356 return static_cast<DerivedT *>(this)->getBuckets(); 357 } 358 unsigned getNumBuckets() const { 359 return static_cast<const DerivedT *>(this)->getNumBuckets(); 360 } 361 BucketT *getBucketsEnd() { 362 return getBuckets() + getNumBuckets(); 363 } 364 const BucketT *getBucketsEnd() const { 365 return getBuckets() + getNumBuckets(); 366 } 367 368 void grow(unsigned AtLeast) { 369 static_cast<DerivedT *>(this)->grow(AtLeast); 370 } 371 372 void shrink_and_clear() { 373 static_cast<DerivedT *>(this)->shrink_and_clear(); 374 } 375 376 377 BucketT *InsertIntoBucket(const KeyT &Key, const ValueT &Value, 378 BucketT *TheBucket) { 379 TheBucket = InsertIntoBucketImpl(Key, TheBucket); 380 381 TheBucket->first = Key; 382 new (&TheBucket->second) ValueT(Value); 383 return TheBucket; 384 } 385 386#if LLVM_HAS_RVALUE_REFERENCES 387 BucketT *InsertIntoBucket(const KeyT &Key, ValueT &&Value, 388 BucketT *TheBucket) { 389 TheBucket = InsertIntoBucketImpl(Key, TheBucket); 390 391 TheBucket->first = Key; 392 new (&TheBucket->second) ValueT(std::move(Value)); 393 return TheBucket; 394 } 395 396 BucketT *InsertIntoBucket(KeyT &&Key, ValueT &&Value, BucketT *TheBucket) { 397 TheBucket = InsertIntoBucketImpl(Key, TheBucket); 398 399 TheBucket->first = std::move(Key); 400 new (&TheBucket->second) ValueT(std::move(Value)); 401 return TheBucket; 402 } 403#endif 404 405 BucketT *InsertIntoBucketImpl(const KeyT &Key, BucketT *TheBucket) { 406 // If the load of the hash table is more than 3/4, or if fewer than 1/8 of 407 // the buckets are empty (meaning that many are filled with tombstones), 408 // grow the table. 409 // 410 // The later case is tricky. For example, if we had one empty bucket with 411 // tons of tombstones, failing lookups (e.g. for insertion) would have to 412 // probe almost the entire table until it found the empty bucket. If the 413 // table completely filled with tombstones, no lookup would ever succeed, 414 // causing infinite loops in lookup. 415 unsigned NewNumEntries = getNumEntries() + 1; 416 unsigned NumBuckets = getNumBuckets(); 417 if (NewNumEntries*4 >= NumBuckets*3) { 418 this->grow(NumBuckets * 2); 419 LookupBucketFor(Key, TheBucket); 420 NumBuckets = getNumBuckets(); 421 } 422 if (NumBuckets-(NewNumEntries+getNumTombstones()) <= NumBuckets/8) { 423 this->grow(NumBuckets * 2); 424 LookupBucketFor(Key, TheBucket); 425 } 426 assert(TheBucket); 427 428 // Only update the state after we've grown our bucket space appropriately 429 // so that when growing buckets we have self-consistent entry count. 430 incrementNumEntries(); 431 432 // If we are writing over a tombstone, remember this. 433 const KeyT EmptyKey = getEmptyKey(); 434 if (!KeyInfoT::isEqual(TheBucket->first, EmptyKey)) 435 decrementNumTombstones(); 436 437 return TheBucket; 438 } 439 440 /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in 441 /// FoundBucket. If the bucket contains the key and a value, this returns 442 /// true, otherwise it returns a bucket with an empty marker or tombstone and 443 /// returns false. 444 template<typename LookupKeyT> 445 bool LookupBucketFor(const LookupKeyT &Val, 446 const BucketT *&FoundBucket) const { 447 const BucketT *BucketsPtr = getBuckets(); 448 const unsigned NumBuckets = getNumBuckets(); 449 450 if (NumBuckets == 0) { 451 FoundBucket = 0; 452 return false; 453 } 454 455 // FoundTombstone - Keep track of whether we find a tombstone while probing. 456 const BucketT *FoundTombstone = 0; 457 const KeyT EmptyKey = getEmptyKey(); 458 const KeyT TombstoneKey = getTombstoneKey(); 459 assert(!KeyInfoT::isEqual(Val, EmptyKey) && 460 !KeyInfoT::isEqual(Val, TombstoneKey) && 461 "Empty/Tombstone value shouldn't be inserted into map!"); 462 463 unsigned BucketNo = getHashValue(Val) & (NumBuckets-1); 464 unsigned ProbeAmt = 1; 465 while (1) { 466 const BucketT *ThisBucket = BucketsPtr + BucketNo; 467 // Found Val's bucket? If so, return it. 468 if (KeyInfoT::isEqual(Val, ThisBucket->first)) { 469 FoundBucket = ThisBucket; 470 return true; 471 } 472 473 // If we found an empty bucket, the key doesn't exist in the set. 474 // Insert it and return the default value. 475 if (KeyInfoT::isEqual(ThisBucket->first, EmptyKey)) { 476 // If we've already seen a tombstone while probing, fill it in instead 477 // of the empty bucket we eventually probed to. 478 if (FoundTombstone) ThisBucket = FoundTombstone; 479 FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket; 480 return false; 481 } 482 483 // If this is a tombstone, remember it. If Val ends up not in the map, we 484 // prefer to return it than something that would require more probing. 485 if (KeyInfoT::isEqual(ThisBucket->first, TombstoneKey) && !FoundTombstone) 486 FoundTombstone = ThisBucket; // Remember the first tombstone found. 487 488 // Otherwise, it's a hash collision or a tombstone, continue quadratic 489 // probing. 490 BucketNo += ProbeAmt++; 491 BucketNo &= (NumBuckets-1); 492 } 493 } 494 495 template <typename LookupKeyT> 496 bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) { 497 const BucketT *ConstFoundBucket; 498 bool Result = const_cast<const DenseMapBase *>(this) 499 ->LookupBucketFor(Val, ConstFoundBucket); 500 FoundBucket = const_cast<BucketT *>(ConstFoundBucket); 501 return Result; 502 } 503 504public: 505 /// Return the approximate size (in bytes) of the actual map. 506 /// This is just the raw memory used by DenseMap. 507 /// If entries are pointers to objects, the size of the referenced objects 508 /// are not included. 509 size_t getMemorySize() const { 510 return getNumBuckets() * sizeof(BucketT); 511 } 512}; 513 514template<typename KeyT, typename ValueT, 515 typename KeyInfoT = DenseMapInfo<KeyT> > 516class DenseMap 517 : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT>, 518 KeyT, ValueT, KeyInfoT> { 519 // Lift some types from the dependent base class into this class for 520 // simplicity of referring to them. 521 typedef DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT> BaseT; 522 typedef typename BaseT::BucketT BucketT; 523 friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT>; 524 525 BucketT *Buckets; 526 unsigned NumEntries; 527 unsigned NumTombstones; 528 unsigned NumBuckets; 529 530public: 531 explicit DenseMap(unsigned NumInitBuckets = 0) { 532 init(NumInitBuckets); 533 } 534 535 DenseMap(const DenseMap &other) : BaseT() { 536 init(0); 537 copyFrom(other); 538 } 539 540#if LLVM_HAS_RVALUE_REFERENCES 541 DenseMap(DenseMap &&other) : BaseT() { 542 init(0); 543 swap(other); 544 } 545#endif 546 547 template<typename InputIt> 548 DenseMap(const InputIt &I, const InputIt &E) { 549 init(NextPowerOf2(std::distance(I, E))); 550 this->insert(I, E); 551 } 552 553 ~DenseMap() { 554 this->destroyAll(); 555 operator delete(Buckets); 556 } 557 558 void swap(DenseMap& RHS) { 559 std::swap(Buckets, RHS.Buckets); 560 std::swap(NumEntries, RHS.NumEntries); 561 std::swap(NumTombstones, RHS.NumTombstones); 562 std::swap(NumBuckets, RHS.NumBuckets); 563 } 564 565 DenseMap& operator=(const DenseMap& other) { 566 copyFrom(other); 567 return *this; 568 } 569 570#if LLVM_HAS_RVALUE_REFERENCES 571 DenseMap& operator=(DenseMap &&other) { 572 this->destroyAll(); 573 operator delete(Buckets); 574 init(0); 575 swap(other); 576 return *this; 577 } 578#endif 579 580 void copyFrom(const DenseMap& other) { 581 this->destroyAll(); 582 operator delete(Buckets); 583 if (allocateBuckets(other.NumBuckets)) { 584 this->BaseT::copyFrom(other); 585 } else { 586 NumEntries = 0; 587 NumTombstones = 0; 588 } 589 } 590 591 void init(unsigned InitBuckets) { 592 if (allocateBuckets(InitBuckets)) { 593 this->BaseT::initEmpty(); 594 } else { 595 NumEntries = 0; 596 NumTombstones = 0; 597 } 598 } 599 600 void grow(unsigned AtLeast) { 601 unsigned OldNumBuckets = NumBuckets; 602 BucketT *OldBuckets = Buckets; 603 604 allocateBuckets(std::max<unsigned>(64, NextPowerOf2(AtLeast-1))); 605 assert(Buckets); 606 if (!OldBuckets) { 607 this->BaseT::initEmpty(); 608 return; 609 } 610 611 this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets); 612 613 // Free the old table. 614 operator delete(OldBuckets); 615 } 616 617 void shrink_and_clear() { 618 unsigned OldNumEntries = NumEntries; 619 this->destroyAll(); 620 621 // Reduce the number of buckets. 622 unsigned NewNumBuckets = 0; 623 if (OldNumEntries) 624 NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1)); 625 if (NewNumBuckets == NumBuckets) { 626 this->BaseT::initEmpty(); 627 return; 628 } 629 630 operator delete(Buckets); 631 init(NewNumBuckets); 632 } 633 634private: 635 unsigned getNumEntries() const { 636 return NumEntries; 637 } 638 void setNumEntries(unsigned Num) { 639 NumEntries = Num; 640 } 641 642 unsigned getNumTombstones() const { 643 return NumTombstones; 644 } 645 void setNumTombstones(unsigned Num) { 646 NumTombstones = Num; 647 } 648 649 BucketT *getBuckets() const { 650 return Buckets; 651 } 652 653 unsigned getNumBuckets() const { 654 return NumBuckets; 655 } 656 657 bool allocateBuckets(unsigned Num) { 658 NumBuckets = Num; 659 if (NumBuckets == 0) { 660 Buckets = 0; 661 return false; 662 } 663 664 Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT) * NumBuckets)); 665 return true; 666 } 667}; 668 669template<typename KeyT, typename ValueT, 670 unsigned InlineBuckets = 4, 671 typename KeyInfoT = DenseMapInfo<KeyT> > 672class SmallDenseMap 673 : public DenseMapBase<SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT>, 674 KeyT, ValueT, KeyInfoT> { 675 // Lift some types from the dependent base class into this class for 676 // simplicity of referring to them. 677 typedef DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT> BaseT; 678 typedef typename BaseT::BucketT BucketT; 679 friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT>; 680 681 unsigned Small : 1; 682 unsigned NumEntries : 31; 683 unsigned NumTombstones; 684 685 struct LargeRep { 686 BucketT *Buckets; 687 unsigned NumBuckets; 688 }; 689 690 /// A "union" of an inline bucket array and the struct representing 691 /// a large bucket. This union will be discriminated by the 'Small' bit. 692 AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage; 693 694public: 695 explicit SmallDenseMap(unsigned NumInitBuckets = 0) { 696 init(NumInitBuckets); 697 } 698 699 SmallDenseMap(const SmallDenseMap &other) { 700 init(0); 701 copyFrom(other); 702 } 703 704#if LLVM_HAS_RVALUE_REFERENCES 705 SmallDenseMap(SmallDenseMap &&other) { 706 init(0); 707 swap(other); 708 } 709#endif 710 711 template<typename InputIt> 712 SmallDenseMap(const InputIt &I, const InputIt &E) { 713 init(NextPowerOf2(std::distance(I, E))); 714 this->insert(I, E); 715 } 716 717 ~SmallDenseMap() { 718 this->destroyAll(); 719 deallocateBuckets(); 720 } 721 722 void swap(SmallDenseMap& RHS) { 723 unsigned TmpNumEntries = RHS.NumEntries; 724 RHS.NumEntries = NumEntries; 725 NumEntries = TmpNumEntries; 726 std::swap(NumTombstones, RHS.NumTombstones); 727 728 const KeyT EmptyKey = this->getEmptyKey(); 729 const KeyT TombstoneKey = this->getTombstoneKey(); 730 if (Small && RHS.Small) { 731 // If we're swapping inline bucket arrays, we have to cope with some of 732 // the tricky bits of DenseMap's storage system: the buckets are not 733 // fully initialized. Thus we swap every key, but we may have 734 // a one-directional move of the value. 735 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) { 736 BucketT *LHSB = &getInlineBuckets()[i], 737 *RHSB = &RHS.getInlineBuckets()[i]; 738 bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->first, EmptyKey) && 739 !KeyInfoT::isEqual(LHSB->first, TombstoneKey)); 740 bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->first, EmptyKey) && 741 !KeyInfoT::isEqual(RHSB->first, TombstoneKey)); 742 if (hasLHSValue && hasRHSValue) { 743 // Swap together if we can... 744 std::swap(*LHSB, *RHSB); 745 continue; 746 } 747 // Swap separately and handle any assymetry. 748 std::swap(LHSB->first, RHSB->first); 749 if (hasLHSValue) { 750 new (&RHSB->second) ValueT(llvm_move(LHSB->second)); 751 LHSB->second.~ValueT(); 752 } else if (hasRHSValue) { 753 new (&LHSB->second) ValueT(llvm_move(RHSB->second)); 754 RHSB->second.~ValueT(); 755 } 756 } 757 return; 758 } 759 if (!Small && !RHS.Small) { 760 std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets); 761 std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets); 762 return; 763 } 764 765 SmallDenseMap &SmallSide = Small ? *this : RHS; 766 SmallDenseMap &LargeSide = Small ? RHS : *this; 767 768 // First stash the large side's rep and move the small side across. 769 LargeRep TmpRep = llvm_move(*LargeSide.getLargeRep()); 770 LargeSide.getLargeRep()->~LargeRep(); 771 LargeSide.Small = true; 772 // This is similar to the standard move-from-old-buckets, but the bucket 773 // count hasn't actually rotated in this case. So we have to carefully 774 // move construct the keys and values into their new locations, but there 775 // is no need to re-hash things. 776 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) { 777 BucketT *NewB = &LargeSide.getInlineBuckets()[i], 778 *OldB = &SmallSide.getInlineBuckets()[i]; 779 new (&NewB->first) KeyT(llvm_move(OldB->first)); 780 OldB->first.~KeyT(); 781 if (!KeyInfoT::isEqual(NewB->first, EmptyKey) && 782 !KeyInfoT::isEqual(NewB->first, TombstoneKey)) { 783 new (&NewB->second) ValueT(llvm_move(OldB->second)); 784 OldB->second.~ValueT(); 785 } 786 } 787 788 // The hard part of moving the small buckets across is done, just move 789 // the TmpRep into its new home. 790 SmallSide.Small = false; 791 new (SmallSide.getLargeRep()) LargeRep(llvm_move(TmpRep)); 792 } 793 794 SmallDenseMap& operator=(const SmallDenseMap& other) { 795 copyFrom(other); 796 return *this; 797 } 798 799#if LLVM_HAS_RVALUE_REFERENCES 800 SmallDenseMap& operator=(SmallDenseMap &&other) { 801 this->destroyAll(); 802 deallocateBuckets(); 803 init(0); 804 swap(other); 805 return *this; 806 } 807#endif 808 809 void copyFrom(const SmallDenseMap& other) { 810 this->destroyAll(); 811 deallocateBuckets(); 812 Small = true; 813 if (other.getNumBuckets() > InlineBuckets) { 814 Small = false; 815 allocateBuckets(other.getNumBuckets()); 816 } 817 this->BaseT::copyFrom(other); 818 } 819 820 void init(unsigned InitBuckets) { 821 Small = true; 822 if (InitBuckets > InlineBuckets) { 823 Small = false; 824 new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets)); 825 } 826 this->BaseT::initEmpty(); 827 } 828 829 void grow(unsigned AtLeast) { 830 if (AtLeast >= InlineBuckets) 831 AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1)); 832 833 if (Small) { 834 if (AtLeast < InlineBuckets) 835 return; // Nothing to do. 836 837 // First move the inline buckets into a temporary storage. 838 AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage; 839 BucketT *TmpBegin = reinterpret_cast<BucketT *>(TmpStorage.buffer); 840 BucketT *TmpEnd = TmpBegin; 841 842 // Loop over the buckets, moving non-empty, non-tombstones into the 843 // temporary storage. Have the loop move the TmpEnd forward as it goes. 844 const KeyT EmptyKey = this->getEmptyKey(); 845 const KeyT TombstoneKey = this->getTombstoneKey(); 846 for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) { 847 if (!KeyInfoT::isEqual(P->first, EmptyKey) && 848 !KeyInfoT::isEqual(P->first, TombstoneKey)) { 849 assert(size_t(TmpEnd - TmpBegin) < InlineBuckets && 850 "Too many inline buckets!"); 851 new (&TmpEnd->first) KeyT(llvm_move(P->first)); 852 new (&TmpEnd->second) ValueT(llvm_move(P->second)); 853 ++TmpEnd; 854 P->second.~ValueT(); 855 } 856 P->first.~KeyT(); 857 } 858 859 // Now make this map use the large rep, and move all the entries back 860 // into it. 861 Small = false; 862 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast)); 863 this->moveFromOldBuckets(TmpBegin, TmpEnd); 864 return; 865 } 866 867 LargeRep OldRep = llvm_move(*getLargeRep()); 868 getLargeRep()->~LargeRep(); 869 if (AtLeast <= InlineBuckets) { 870 Small = true; 871 } else { 872 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast)); 873 } 874 875 this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets); 876 877 // Free the old table. 878 operator delete(OldRep.Buckets); 879 } 880 881 void shrink_and_clear() { 882 unsigned OldSize = this->size(); 883 this->destroyAll(); 884 885 // Reduce the number of buckets. 886 unsigned NewNumBuckets = 0; 887 if (OldSize) { 888 NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1); 889 if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u) 890 NewNumBuckets = 64; 891 } 892 if ((Small && NewNumBuckets <= InlineBuckets) || 893 (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) { 894 this->BaseT::initEmpty(); 895 return; 896 } 897 898 deallocateBuckets(); 899 init(NewNumBuckets); 900 } 901 902private: 903 unsigned getNumEntries() const { 904 return NumEntries; 905 } 906 void setNumEntries(unsigned Num) { 907 assert(Num < INT_MAX && "Cannot support more than INT_MAX entries"); 908 NumEntries = Num; 909 } 910 911 unsigned getNumTombstones() const { 912 return NumTombstones; 913 } 914 void setNumTombstones(unsigned Num) { 915 NumTombstones = Num; 916 } 917 918 const BucketT *getInlineBuckets() const { 919 assert(Small); 920 // Note that this cast does not violate aliasing rules as we assert that 921 // the memory's dynamic type is the small, inline bucket buffer, and the 922 // 'storage.buffer' static type is 'char *'. 923 return reinterpret_cast<const BucketT *>(storage.buffer); 924 } 925 BucketT *getInlineBuckets() { 926 return const_cast<BucketT *>( 927 const_cast<const SmallDenseMap *>(this)->getInlineBuckets()); 928 } 929 const LargeRep *getLargeRep() const { 930 assert(!Small); 931 // Note, same rule about aliasing as with getInlineBuckets. 932 return reinterpret_cast<const LargeRep *>(storage.buffer); 933 } 934 LargeRep *getLargeRep() { 935 return const_cast<LargeRep *>( 936 const_cast<const SmallDenseMap *>(this)->getLargeRep()); 937 } 938 939 const BucketT *getBuckets() const { 940 return Small ? getInlineBuckets() : getLargeRep()->Buckets; 941 } 942 BucketT *getBuckets() { 943 return const_cast<BucketT *>( 944 const_cast<const SmallDenseMap *>(this)->getBuckets()); 945 } 946 unsigned getNumBuckets() const { 947 return Small ? InlineBuckets : getLargeRep()->NumBuckets; 948 } 949 950 void deallocateBuckets() { 951 if (Small) 952 return; 953 954 operator delete(getLargeRep()->Buckets); 955 getLargeRep()->~LargeRep(); 956 } 957 958 LargeRep allocateBuckets(unsigned Num) { 959 assert(Num > InlineBuckets && "Must allocate more buckets than are inline"); 960 LargeRep Rep = { 961 static_cast<BucketT*>(operator new(sizeof(BucketT) * Num)), Num 962 }; 963 return Rep; 964 } 965}; 966 967template<typename KeyT, typename ValueT, 968 typename KeyInfoT, bool IsConst> 969class DenseMapIterator { 970 typedef std::pair<KeyT, ValueT> Bucket; 971 typedef DenseMapIterator<KeyT, ValueT, 972 KeyInfoT, true> ConstIterator; 973 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, true>; 974public: 975 typedef ptrdiff_t difference_type; 976 typedef typename conditional<IsConst, const Bucket, Bucket>::type value_type; 977 typedef value_type *pointer; 978 typedef value_type &reference; 979 typedef std::forward_iterator_tag iterator_category; 980private: 981 pointer Ptr, End; 982public: 983 DenseMapIterator() : Ptr(0), End(0) {} 984 985 DenseMapIterator(pointer Pos, pointer E, bool NoAdvance = false) 986 : Ptr(Pos), End(E) { 987 if (!NoAdvance) AdvancePastEmptyBuckets(); 988 } 989 990 // If IsConst is true this is a converting constructor from iterator to 991 // const_iterator and the default copy constructor is used. 992 // Otherwise this is a copy constructor for iterator. 993 DenseMapIterator(const DenseMapIterator<KeyT, ValueT, 994 KeyInfoT, false>& I) 995 : Ptr(I.Ptr), End(I.End) {} 996 997 reference operator*() const { 998 return *Ptr; 999 } 1000 pointer operator->() const { 1001 return Ptr; 1002 } 1003 1004 bool operator==(const ConstIterator &RHS) const { 1005 return Ptr == RHS.operator->(); 1006 } 1007 bool operator!=(const ConstIterator &RHS) const { 1008 return Ptr != RHS.operator->(); 1009 } 1010 1011 inline DenseMapIterator& operator++() { // Preincrement 1012 ++Ptr; 1013 AdvancePastEmptyBuckets(); 1014 return *this; 1015 } 1016 DenseMapIterator operator++(int) { // Postincrement 1017 DenseMapIterator tmp = *this; ++*this; return tmp; 1018 } 1019 1020private: 1021 void AdvancePastEmptyBuckets() { 1022 const KeyT Empty = KeyInfoT::getEmptyKey(); 1023 const KeyT Tombstone = KeyInfoT::getTombstoneKey(); 1024 1025 while (Ptr != End && 1026 (KeyInfoT::isEqual(Ptr->first, Empty) || 1027 KeyInfoT::isEqual(Ptr->first, Tombstone))) 1028 ++Ptr; 1029 } 1030}; 1031 1032template<typename KeyT, typename ValueT, typename KeyInfoT> 1033static inline size_t 1034capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) { 1035 return X.getMemorySize(); 1036} 1037 1038} // end namespace llvm 1039 1040#endif 1041