DenseMap.h revision dd9d38d57bbd2161e04af90a9e03011afb039b16
1//===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the DenseMap class. 11// 12//===----------------------------------------------------------------------===// 13 14#ifndef LLVM_ADT_DENSEMAP_H 15#define LLVM_ADT_DENSEMAP_H 16 17#include "llvm/Support/Compiler.h" 18#include "llvm/Support/AlignOf.h" 19#include "llvm/Support/MathExtras.h" 20#include "llvm/Support/PointerLikeTypeTraits.h" 21#include "llvm/Support/type_traits.h" 22#include "llvm/ADT/DenseMapInfo.h" 23#include <algorithm> 24#include <iterator> 25#include <new> 26#include <utility> 27#include <cassert> 28#include <climits> 29#include <cstddef> 30#include <cstring> 31 32namespace llvm { 33 34template<typename KeyT, typename ValueT, 35 typename KeyInfoT = DenseMapInfo<KeyT>, 36 bool IsConst = false> 37class DenseMapIterator; 38 39template<typename DerivedT, 40 typename KeyT, typename ValueT, typename KeyInfoT> 41class DenseMapBase { 42protected: 43 typedef std::pair<KeyT, ValueT> BucketT; 44 45public: 46 typedef KeyT key_type; 47 typedef ValueT mapped_type; 48 typedef BucketT value_type; 49 50 typedef DenseMapIterator<KeyT, ValueT, KeyInfoT> iterator; 51 typedef DenseMapIterator<KeyT, ValueT, 52 KeyInfoT, true> const_iterator; 53 inline iterator begin() { 54 // When the map is empty, avoid the overhead of AdvancePastEmptyBuckets(). 55 return empty() ? end() : iterator(getBuckets(), getBucketsEnd()); 56 } 57 inline iterator end() { 58 return iterator(getBucketsEnd(), getBucketsEnd(), true); 59 } 60 inline const_iterator begin() const { 61 return empty() ? end() : const_iterator(getBuckets(), getBucketsEnd()); 62 } 63 inline const_iterator end() const { 64 return const_iterator(getBucketsEnd(), getBucketsEnd(), true); 65 } 66 67 bool empty() const { return getNumEntries() == 0; } 68 unsigned size() const { return getNumEntries(); } 69 70 /// Grow the densemap so that it has at least Size buckets. Does not shrink 71 void resize(size_t Size) { 72 if (Size > getNumBuckets()) 73 grow(Size); 74 } 75 76 void clear() { 77 if (getNumEntries() == 0 && getNumTombstones() == 0) return; 78 79 // If the capacity of the array is huge, and the # elements used is small, 80 // shrink the array. 81 if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) { 82 shrink_and_clear(); 83 return; 84 } 85 86 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey(); 87 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) { 88 if (!KeyInfoT::isEqual(P->first, EmptyKey)) { 89 if (!KeyInfoT::isEqual(P->first, TombstoneKey)) { 90 P->second.~ValueT(); 91 decrementNumEntries(); 92 } 93 P->first = EmptyKey; 94 } 95 } 96 assert(getNumEntries() == 0 && "Node count imbalance!"); 97 setNumTombstones(0); 98 } 99 100 /// count - Return true if the specified key is in the map. 101 bool count(const KeyT &Val) const { 102 const BucketT *TheBucket; 103 return LookupBucketFor(Val, TheBucket); 104 } 105 106 iterator find(const KeyT &Val) { 107 BucketT *TheBucket; 108 if (LookupBucketFor(Val, TheBucket)) 109 return iterator(TheBucket, getBucketsEnd(), true); 110 return end(); 111 } 112 const_iterator find(const KeyT &Val) const { 113 const BucketT *TheBucket; 114 if (LookupBucketFor(Val, TheBucket)) 115 return const_iterator(TheBucket, getBucketsEnd(), true); 116 return end(); 117 } 118 119 /// Alternate version of find() which allows a different, and possibly 120 /// less expensive, key type. 121 /// The DenseMapInfo is responsible for supplying methods 122 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key 123 /// type used. 124 template<class LookupKeyT> 125 iterator find_as(const LookupKeyT &Val) { 126 BucketT *TheBucket; 127 if (LookupBucketFor(Val, TheBucket)) 128 return iterator(TheBucket, getBucketsEnd(), true); 129 return end(); 130 } 131 template<class LookupKeyT> 132 const_iterator find_as(const LookupKeyT &Val) const { 133 const BucketT *TheBucket; 134 if (LookupBucketFor(Val, TheBucket)) 135 return const_iterator(TheBucket, getBucketsEnd(), true); 136 return end(); 137 } 138 139 /// lookup - Return the entry for the specified key, or a default 140 /// constructed value if no such entry exists. 141 ValueT lookup(const KeyT &Val) const { 142 const BucketT *TheBucket; 143 if (LookupBucketFor(Val, TheBucket)) 144 return TheBucket->second; 145 return ValueT(); 146 } 147 148 // Inserts key,value pair into the map if the key isn't already in the map. 149 // If the key is already in the map, it returns false and doesn't update the 150 // value. 151 std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) { 152 BucketT *TheBucket; 153 if (LookupBucketFor(KV.first, TheBucket)) 154 return std::make_pair(iterator(TheBucket, getBucketsEnd(), true), 155 false); // Already in map. 156 157 // Otherwise, insert the new element. 158 TheBucket = InsertIntoBucket(KV.first, KV.second, TheBucket); 159 return std::make_pair(iterator(TheBucket, getBucketsEnd(), true), true); 160 } 161 162 /// insert - Range insertion of pairs. 163 template<typename InputIt> 164 void insert(InputIt I, InputIt E) { 165 for (; I != E; ++I) 166 insert(*I); 167 } 168 169 170 bool erase(const KeyT &Val) { 171 BucketT *TheBucket; 172 if (!LookupBucketFor(Val, TheBucket)) 173 return false; // not in map. 174 175 TheBucket->second.~ValueT(); 176 TheBucket->first = getTombstoneKey(); 177 decrementNumEntries(); 178 incrementNumTombstones(); 179 return true; 180 } 181 void erase(iterator I) { 182 BucketT *TheBucket = &*I; 183 TheBucket->second.~ValueT(); 184 TheBucket->first = getTombstoneKey(); 185 decrementNumEntries(); 186 incrementNumTombstones(); 187 } 188 189 value_type& FindAndConstruct(const KeyT &Key) { 190 BucketT *TheBucket; 191 if (LookupBucketFor(Key, TheBucket)) 192 return *TheBucket; 193 194 return *InsertIntoBucket(Key, ValueT(), TheBucket); 195 } 196 197 ValueT &operator[](const KeyT &Key) { 198 return FindAndConstruct(Key).second; 199 } 200 201#if LLVM_USE_RVALUE_REFERENCES 202 value_type& FindAndConstruct(KeyT &&Key) { 203 BucketT *TheBucket; 204 if (LookupBucketFor(Key, TheBucket)) 205 return *TheBucket; 206 207 return *InsertIntoBucket(Key, ValueT(), TheBucket); 208 } 209 210 ValueT &operator[](KeyT &&Key) { 211 return FindAndConstruct(Key).second; 212 } 213#endif 214 215 /// isPointerIntoBucketsArray - Return true if the specified pointer points 216 /// somewhere into the DenseMap's array of buckets (i.e. either to a key or 217 /// value in the DenseMap). 218 bool isPointerIntoBucketsArray(const void *Ptr) const { 219 return Ptr >= getBuckets() && Ptr < getBucketsEnd(); 220 } 221 222 /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets 223 /// array. In conjunction with the previous method, this can be used to 224 /// determine whether an insertion caused the DenseMap to reallocate. 225 const void *getPointerIntoBucketsArray() const { return getBuckets(); } 226 227protected: 228 DenseMapBase() {} 229 230 void destroyAll() { 231 if (getNumBuckets() == 0) // Nothing to do. 232 return; 233 234 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey(); 235 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) { 236 if (!KeyInfoT::isEqual(P->first, EmptyKey) && 237 !KeyInfoT::isEqual(P->first, TombstoneKey)) 238 P->second.~ValueT(); 239 P->first.~KeyT(); 240 } 241 242#ifndef NDEBUG 243 memset((void*)getBuckets(), 0x5a, sizeof(BucketT)*getNumBuckets()); 244#endif 245 } 246 247 void initEmpty() { 248 setNumEntries(0); 249 setNumTombstones(0); 250 251 assert((getNumBuckets() & (getNumBuckets()-1)) == 0 && 252 "# initial buckets must be a power of two!"); 253 const KeyT EmptyKey = getEmptyKey(); 254 for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B) 255 new (&B->first) KeyT(EmptyKey); 256 } 257 258 void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) { 259 initEmpty(); 260 261 // Insert all the old elements. 262 const KeyT EmptyKey = getEmptyKey(); 263 const KeyT TombstoneKey = getTombstoneKey(); 264 for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) { 265 if (!KeyInfoT::isEqual(B->first, EmptyKey) && 266 !KeyInfoT::isEqual(B->first, TombstoneKey)) { 267 // Insert the key/value into the new table. 268 BucketT *DestBucket; 269 bool FoundVal = LookupBucketFor(B->first, DestBucket); 270 (void)FoundVal; // silence warning. 271 assert(!FoundVal && "Key already in new map?"); 272 DestBucket->first = llvm_move(B->first); 273 new (&DestBucket->second) ValueT(llvm_move(B->second)); 274 incrementNumEntries(); 275 276 // Free the value. 277 B->second.~ValueT(); 278 } 279 B->first.~KeyT(); 280 } 281 282#ifndef NDEBUG 283 if (OldBucketsBegin != OldBucketsEnd) 284 memset((void*)OldBucketsBegin, 0x5a, 285 sizeof(BucketT) * (OldBucketsEnd - OldBucketsBegin)); 286#endif 287 } 288 289 template <typename OtherBaseT> 290 void copyFrom(const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT>& other) { 291 assert(getNumBuckets() == other.getNumBuckets()); 292 293 setNumEntries(other.getNumEntries()); 294 setNumTombstones(other.getNumTombstones()); 295 296 if (isPodLike<KeyT>::value && isPodLike<ValueT>::value) 297 memcpy(getBuckets(), other.getBuckets(), 298 getNumBuckets() * sizeof(BucketT)); 299 else 300 for (size_t i = 0; i < getNumBuckets(); ++i) { 301 new (&getBuckets()[i].first) KeyT(other.getBuckets()[i].first); 302 if (!KeyInfoT::isEqual(getBuckets()[i].first, getEmptyKey()) && 303 !KeyInfoT::isEqual(getBuckets()[i].first, getTombstoneKey())) 304 new (&getBuckets()[i].second) ValueT(other.getBuckets()[i].second); 305 } 306 } 307 308 void swap(DenseMapBase& RHS) { 309 std::swap(getNumEntries(), RHS.getNumEntries()); 310 std::swap(getNumTombstones(), RHS.getNumTombstones()); 311 } 312 313private: 314 static unsigned getHashValue(const KeyT &Val) { 315 return KeyInfoT::getHashValue(Val); 316 } 317 template<typename LookupKeyT> 318 static unsigned getHashValue(const LookupKeyT &Val) { 319 return KeyInfoT::getHashValue(Val); 320 } 321 static const KeyT getEmptyKey() { 322 return KeyInfoT::getEmptyKey(); 323 } 324 static const KeyT getTombstoneKey() { 325 return KeyInfoT::getTombstoneKey(); 326 } 327 328 unsigned getNumEntries() const { 329 return static_cast<const DerivedT *>(this)->getNumEntries(); 330 } 331 void setNumEntries(unsigned Num) { 332 static_cast<DerivedT *>(this)->setNumEntries(Num); 333 } 334 void incrementNumEntries() { 335 setNumEntries(getNumEntries() + 1); 336 } 337 void decrementNumEntries() { 338 setNumEntries(getNumEntries() - 1); 339 } 340 unsigned getNumTombstones() const { 341 return static_cast<const DerivedT *>(this)->getNumTombstones(); 342 } 343 void setNumTombstones(unsigned Num) { 344 static_cast<DerivedT *>(this)->setNumTombstones(Num); 345 } 346 void incrementNumTombstones() { 347 setNumTombstones(getNumTombstones() + 1); 348 } 349 void decrementNumTombstones() { 350 setNumTombstones(getNumTombstones() - 1); 351 } 352 const BucketT *getBuckets() const { 353 return static_cast<const DerivedT *>(this)->getBuckets(); 354 } 355 BucketT *getBuckets() { 356 return static_cast<DerivedT *>(this)->getBuckets(); 357 } 358 unsigned getNumBuckets() const { 359 return static_cast<const DerivedT *>(this)->getNumBuckets(); 360 } 361 BucketT *getBucketsEnd() { 362 return getBuckets() + getNumBuckets(); 363 } 364 const BucketT *getBucketsEnd() const { 365 return getBuckets() + getNumBuckets(); 366 } 367 368 void grow(unsigned AtLeast) { 369 static_cast<DerivedT *>(this)->grow(AtLeast); 370 } 371 372 void shrink_and_clear() { 373 static_cast<DerivedT *>(this)->shrink_and_clear(); 374 } 375 376 377 BucketT *InsertIntoBucket(const KeyT &Key, const ValueT &Value, 378 BucketT *TheBucket) { 379 TheBucket = InsertIntoBucketImpl(Key, TheBucket); 380 381 TheBucket->first = Key; 382 new (&TheBucket->second) ValueT(Value); 383 return TheBucket; 384 } 385 386#if LLVM_USE_RVALUE_REFERENCES 387 BucketT *InsertIntoBucket(const KeyT &Key, ValueT &&Value, 388 BucketT *TheBucket) { 389 TheBucket = InsertIntoBucketImpl(Key, TheBucket); 390 391 TheBucket->first = Key; 392 new (&TheBucket->second) ValueT(std::move(Value)); 393 return TheBucket; 394 } 395 396 BucketT *InsertIntoBucket(KeyT &&Key, ValueT &&Value, BucketT *TheBucket) { 397 TheBucket = InsertIntoBucketImpl(Key, TheBucket); 398 399 TheBucket->first = std::move(Key); 400 new (&TheBucket->second) ValueT(std::move(Value)); 401 return TheBucket; 402 } 403#endif 404 405 BucketT *InsertIntoBucketImpl(const KeyT &Key, BucketT *TheBucket) { 406 // If the load of the hash table is more than 3/4, or if fewer than 1/8 of 407 // the buckets are empty (meaning that many are filled with tombstones), 408 // grow the table. 409 // 410 // The later case is tricky. For example, if we had one empty bucket with 411 // tons of tombstones, failing lookups (e.g. for insertion) would have to 412 // probe almost the entire table until it found the empty bucket. If the 413 // table completely filled with tombstones, no lookup would ever succeed, 414 // causing infinite loops in lookup. 415 unsigned NewNumEntries = getNumEntries() + 1; 416 unsigned NumBuckets = getNumBuckets(); 417 if (NewNumEntries*4 >= NumBuckets*3) { 418 this->grow(NumBuckets * 2); 419 LookupBucketFor(Key, TheBucket); 420 NumBuckets = getNumBuckets(); 421 } 422 if (NumBuckets-(NewNumEntries+getNumTombstones()) <= NumBuckets/8) { 423 this->grow(NumBuckets); 424 LookupBucketFor(Key, TheBucket); 425 } 426 427 // Only update the state after we've grown our bucket space appropriately 428 // so that when growing buckets we have self-consistent entry count. 429 incrementNumEntries(); 430 431 // If we are writing over a tombstone, remember this. 432 if (!KeyInfoT::isEqual(TheBucket->first, getEmptyKey())) 433 decrementNumTombstones(); 434 435 return TheBucket; 436 } 437 438 /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in 439 /// FoundBucket. If the bucket contains the key and a value, this returns 440 /// true, otherwise it returns a bucket with an empty marker or tombstone and 441 /// returns false. 442 template<typename LookupKeyT> 443 bool LookupBucketFor(const LookupKeyT &Val, 444 const BucketT *&FoundBucket) const { 445 unsigned BucketNo = getHashValue(Val); 446 unsigned ProbeAmt = 1; 447 const BucketT *BucketsPtr = getBuckets(); 448 449 if (getNumBuckets() == 0) { 450 FoundBucket = 0; 451 return false; 452 } 453 454 // FoundTombstone - Keep track of whether we find a tombstone while probing. 455 const BucketT *FoundTombstone = 0; 456 const KeyT EmptyKey = getEmptyKey(); 457 const KeyT TombstoneKey = getTombstoneKey(); 458 assert(!KeyInfoT::isEqual(Val, EmptyKey) && 459 !KeyInfoT::isEqual(Val, TombstoneKey) && 460 "Empty/Tombstone value shouldn't be inserted into map!"); 461 462 while (1) { 463 const BucketT *ThisBucket = BucketsPtr + (BucketNo & (getNumBuckets()-1)); 464 // Found Val's bucket? If so, return it. 465 if (KeyInfoT::isEqual(Val, ThisBucket->first)) { 466 FoundBucket = ThisBucket; 467 return true; 468 } 469 470 // If we found an empty bucket, the key doesn't exist in the set. 471 // Insert it and return the default value. 472 if (KeyInfoT::isEqual(ThisBucket->first, EmptyKey)) { 473 // If we've already seen a tombstone while probing, fill it in instead 474 // of the empty bucket we eventually probed to. 475 if (FoundTombstone) ThisBucket = FoundTombstone; 476 FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket; 477 return false; 478 } 479 480 // If this is a tombstone, remember it. If Val ends up not in the map, we 481 // prefer to return it than something that would require more probing. 482 if (KeyInfoT::isEqual(ThisBucket->first, TombstoneKey) && !FoundTombstone) 483 FoundTombstone = ThisBucket; // Remember the first tombstone found. 484 485 // Otherwise, it's a hash collision or a tombstone, continue quadratic 486 // probing. 487 BucketNo += ProbeAmt++; 488 } 489 } 490 491 template <typename LookupKeyT> 492 bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) { 493 const BucketT *ConstFoundBucket = FoundBucket; 494 bool Result = const_cast<const DenseMapBase *>(this) 495 ->LookupBucketFor(Val, ConstFoundBucket); 496 FoundBucket = const_cast<BucketT *>(ConstFoundBucket); 497 return Result; 498 } 499 500public: 501 /// Return the approximate size (in bytes) of the actual map. 502 /// This is just the raw memory used by DenseMap. 503 /// If entries are pointers to objects, the size of the referenced objects 504 /// are not included. 505 size_t getMemorySize() const { 506 return getNumBuckets() * sizeof(BucketT); 507 } 508}; 509 510template<typename KeyT, typename ValueT, 511 typename KeyInfoT = DenseMapInfo<KeyT> > 512class DenseMap 513 : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT>, 514 KeyT, ValueT, KeyInfoT> { 515 // Lift some types from the dependent base class into this class for 516 // simplicity of referring to them. 517 typedef DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT> BaseT; 518 typedef typename BaseT::BucketT BucketT; 519 friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT>; 520 521 BucketT *Buckets; 522 unsigned NumEntries; 523 unsigned NumTombstones; 524 unsigned NumBuckets; 525 526public: 527 explicit DenseMap(unsigned NumInitBuckets = 0) { 528 init(NumInitBuckets); 529 } 530 531 DenseMap(const DenseMap &other) { 532 init(0); 533 copyFrom(other); 534 } 535 536#if LLVM_USE_RVALUE_REFERENCES 537 DenseMap(DenseMap &&other) { 538 init(0); 539 swap(other); 540 } 541#endif 542 543 template<typename InputIt> 544 DenseMap(const InputIt &I, const InputIt &E) { 545 init(NextPowerOf2(std::distance(I, E))); 546 this->insert(I, E); 547 } 548 549 ~DenseMap() { 550 this->destroyAll(); 551 operator delete(Buckets); 552 } 553 554 void swap(DenseMap& RHS) { 555 std::swap(Buckets, RHS.Buckets); 556 std::swap(NumEntries, RHS.NumEntries); 557 std::swap(NumTombstones, RHS.NumTombstones); 558 std::swap(NumBuckets, RHS.NumBuckets); 559 } 560 561 DenseMap& operator=(const DenseMap& other) { 562 copyFrom(other); 563 return *this; 564 } 565 566#if LLVM_USE_RVALUE_REFERENCES 567 DenseMap& operator=(DenseMap &&other) { 568 this->destroyAll(); 569 operator delete(Buckets); 570 init(0); 571 swap(other); 572 return *this; 573 } 574#endif 575 576 void copyFrom(const DenseMap& other) { 577 this->destroyAll(); 578 operator delete(Buckets); 579 if (allocateBuckets(other.NumBuckets)) { 580 this->BaseT::copyFrom(other); 581 } else { 582 NumEntries = 0; 583 NumTombstones = 0; 584 } 585 } 586 587 void init(unsigned InitBuckets) { 588 if (allocateBuckets(InitBuckets)) { 589 this->BaseT::initEmpty(); 590 } else { 591 NumEntries = 0; 592 NumTombstones = 0; 593 } 594 } 595 596 void grow(unsigned AtLeast) { 597 unsigned OldNumBuckets = NumBuckets; 598 BucketT *OldBuckets = Buckets; 599 600 allocateBuckets(std::max<unsigned>(64, NextPowerOf2(AtLeast))); 601 assert(Buckets); 602 if (!OldBuckets) { 603 this->BaseT::initEmpty(); 604 return; 605 } 606 607 this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets); 608 609 // Free the old table. 610 operator delete(OldBuckets); 611 } 612 613 void shrink_and_clear() { 614 unsigned OldNumEntries = NumEntries; 615 this->destroyAll(); 616 617 // Reduce the number of buckets. 618 unsigned NewNumBuckets 619 = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1)); 620 if (NewNumBuckets == NumBuckets) { 621 this->BaseT::initEmpty(); 622 return; 623 } 624 625 operator delete(Buckets); 626 init(NewNumBuckets); 627 } 628 629private: 630 unsigned getNumEntries() const { 631 return NumEntries; 632 } 633 void setNumEntries(unsigned Num) { 634 NumEntries = Num; 635 } 636 637 unsigned getNumTombstones() const { 638 return NumTombstones; 639 } 640 void setNumTombstones(unsigned Num) { 641 NumTombstones = Num; 642 } 643 644 BucketT *getBuckets() const { 645 return Buckets; 646 } 647 648 unsigned getNumBuckets() const { 649 return NumBuckets; 650 } 651 652 bool allocateBuckets(unsigned Num) { 653 NumBuckets = Num; 654 if (NumBuckets == 0) { 655 Buckets = 0; 656 return false; 657 } 658 659 Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT) * NumBuckets)); 660 return true; 661 } 662}; 663 664template<typename KeyT, typename ValueT, 665 unsigned InlineBuckets = 4, 666 typename KeyInfoT = DenseMapInfo<KeyT> > 667class SmallDenseMap 668 : public DenseMapBase<SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT>, 669 KeyT, ValueT, KeyInfoT> { 670 // Lift some types from the dependent base class into this class for 671 // simplicity of referring to them. 672 typedef DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT> BaseT; 673 typedef typename BaseT::BucketT BucketT; 674 friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT>; 675 676 unsigned Small : 1; 677 unsigned NumEntries : 31; 678 unsigned NumTombstones; 679 680 struct LargeRep { 681 BucketT *Buckets; 682 unsigned NumBuckets; 683 }; 684 685 /// A "union" of an inline bucket array and the struct representing 686 /// a large bucket. This union will be discriminated by the 'Small' bit. 687 typename AlignedCharArray<BucketT[InlineBuckets], LargeRep>::union_type 688 storage; 689 690public: 691 explicit SmallDenseMap(unsigned NumInitBuckets = 0) { 692 init(NumInitBuckets); 693 } 694 695 SmallDenseMap(const SmallDenseMap &other) { 696 init(0); 697 copyFrom(other); 698 } 699 700#if LLVM_USE_RVALUE_REFERENCES 701 SmallDenseMap(SmallDenseMap &&other) { 702 init(0); 703 swap(other); 704 } 705#endif 706 707 template<typename InputIt> 708 SmallDenseMap(const InputIt &I, const InputIt &E) { 709 init(NextPowerOf2(std::distance(I, E))); 710 this->insert(I, E); 711 } 712 713 ~SmallDenseMap() { 714 this->destroyAll(); 715 deallocateBuckets(); 716 } 717 718 void swap(SmallDenseMap& RHS) { 719 std::swap(NumEntries, RHS.NumEntries); 720 std::swap(NumTombstones, RHS.NumTombstones); 721 if (Small && RHS.Small) { 722 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) 723 std::swap(getInlineBuckets()[i], RHS.getInlineBuckes()[i]); 724 return; 725 } 726 if (!Small && !RHS.Small) { 727 std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets); 728 std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets); 729 return; 730 } 731 732 SmallDenseMap &SmallSide = Small ? *this : RHS; 733 SmallDenseMap &LargeSide = Small ? RHS : *this; 734 735 // First stash the large side's rep and move the small side across. 736 LargeRep TmpRep = llvm_move(*LargeSide.getLargeRep()); 737 LargeSide.getLargeRep()->~LargeRep(); 738 LargeSide.Small = true; 739 // This is similar to the standard move-from-old-buckets, but the bucket 740 // count hasn't actually rotate in this case. So we have to carefully 741 // move construct the keys and values into their new locations, but there 742 // is no need to re-hash things. 743 const KeyT EmptyKey = this->getEmptyKey(); 744 const KeyT TombstoneKey = this->getTombstoneKey(); 745 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) { 746 BucketT *NewB = &LargeSide.getInlineBuckets()[i], 747 *OldB = &SmallSide.getInlineBuckets()[i]; 748 new (&NewB->first) KeyT(llvm_move(OldB->first)); 749 NewB->first.~KeyT(); 750 if (!KeyInfoT::isEqual(NewB->first, EmptyKey) && 751 !KeyInfoT::isEqual(NewB->first, TombstoneKey)) { 752 new (&NewB->second) ValueT(llvm_move(OldB->second)); 753 OldB->second.~ValueT(); 754 } 755 } 756 757 // The hard part of moving the small buckets across is done, just move 758 // the TmpRep into its new home. 759 SmallSide.Small = false; 760 new (SmallSide.getLargeRep()) LargeRep(llvm_move(TmpRep)); 761 } 762 763 SmallDenseMap& operator=(const SmallDenseMap& other) { 764 copyFrom(other); 765 return *this; 766 } 767 768#if LLVM_USE_RVALUE_REFERENCES 769 SmallDenseMap& operator=(SmallDenseMap &&other) { 770 this->destroyAll(); 771 deallocateBuckets(); 772 init(0); 773 swap(other); 774 return *this; 775 } 776#endif 777 778 void copyFrom(const SmallDenseMap& other) { 779 this->destroyAll(); 780 deallocateBuckets(); 781 Small = true; 782 if (other.getNumBuckets() > InlineBuckets) { 783 Small = false; 784 allocateBuckets(other.getNumBuckets()); 785 } 786 this->BaseT::copyFrom(other); 787 } 788 789 void init(unsigned InitBuckets) { 790 Small = true; 791 if (InitBuckets > InlineBuckets) { 792 Small = false; 793 new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets)); 794 } 795 this->BaseT::initEmpty(); 796 } 797 798 void grow(unsigned AtLeast) { 799 if (AtLeast > InlineBuckets) 800 AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast)); 801 802 if (Small) { 803 if (AtLeast <= InlineBuckets) 804 return; // Nothing to do. 805 806 // First grow an allocated bucket array in another map and move our 807 // entries into it. 808 // FIXME: This is wasteful, we don't need the inline buffer here, and we 809 // certainly don't need to initialize it to empty. 810 SmallDenseMap TmpMap; 811 TmpMap.Small = false; 812 new (TmpMap.getLargeRep()) LargeRep(allocateBuckets(AtLeast)); 813 TmpMap.moveFromOldBuckets(getInlineBuckets(), 814 getInlineBuckets()+InlineBuckets); 815 816 // Now steal the innards back into this map, and arrange for the 817 // temporary map to be cleanly torn down. 818 assert(NumEntries == TmpMap.NumEntries); 819 Small = false; 820 NumTombstones = llvm_move(TmpMap.NumTombstones); 821 new (getLargeRep()) LargeRep(llvm_move(*TmpMap.getLargeRep())); 822 TmpMap.getLargeRep()->~LargeRep(); 823 TmpMap.Small = true; 824 return; 825 } 826 827 LargeRep OldRep = llvm_move(*getLargeRep()); 828 getLargeRep()->~LargeRep(); 829 if (AtLeast <= InlineBuckets) { 830 Small = true; 831 } else { 832 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast)); 833 } 834 835 this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets); 836 837 // Free the old table. 838 operator delete(OldRep.Buckets); 839 } 840 841 void shrink_and_clear() { 842 unsigned OldSize = this->size(); 843 this->destroyAll(); 844 845 // Reduce the number of buckets. 846 unsigned NewNumBuckets = 0; 847 if (OldSize) { 848 NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1); 849 if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u) 850 NewNumBuckets = 64; 851 } 852 if ((Small && NewNumBuckets <= InlineBuckets) || 853 (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) { 854 this->BaseT::initEmpty(); 855 return; 856 } 857 858 deallocateBuckets(); 859 init(NewNumBuckets); 860 } 861 862private: 863 unsigned getNumEntries() const { 864 return NumEntries; 865 } 866 void setNumEntries(unsigned Num) { 867 assert(Num < INT_MAX && "Cannot support more than INT_MAX entries"); 868 NumEntries = Num; 869 } 870 871 unsigned getNumTombstones() const { 872 return NumTombstones; 873 } 874 void setNumTombstones(unsigned Num) { 875 NumTombstones = Num; 876 } 877 878 const BucketT *getInlineBuckets() const { 879 assert(Small); 880 // Note that this cast does not violate aliasing rules as we assert that 881 // the memory's dynamic type is the small, inline bucket buffer, and the 882 // 'storage.buffer' static type is 'char *'. 883 return reinterpret_cast<const BucketT *>(storage.buffer); 884 } 885 BucketT *getInlineBuckets() { 886 return const_cast<BucketT *>( 887 const_cast<const SmallDenseMap *>(this)->getInlineBuckets()); 888 } 889 const LargeRep *getLargeRep() const { 890 assert(!Small); 891 // Note, same rule about aliasing as with getInlineBuckets. 892 return reinterpret_cast<const LargeRep *>(storage.buffer); 893 } 894 LargeRep *getLargeRep() { 895 return const_cast<LargeRep *>( 896 const_cast<const SmallDenseMap *>(this)->getLargeRep()); 897 } 898 899 const BucketT *getBuckets() const { 900 return Small ? getInlineBuckets() : getLargeRep()->Buckets; 901 } 902 BucketT *getBuckets() { 903 return const_cast<BucketT *>( 904 const_cast<const SmallDenseMap *>(this)->getBuckets()); 905 } 906 unsigned getNumBuckets() const { 907 return Small ? InlineBuckets : getLargeRep()->NumBuckets; 908 } 909 910 void deallocateBuckets() { 911 if (Small) 912 return; 913 914 operator delete(getLargeRep()->Buckets); 915 getLargeRep()->~LargeRep(); 916 } 917 918 LargeRep allocateBuckets(unsigned Num) { 919 assert(Num > InlineBuckets && "Must allocate more buckets than are inline"); 920 LargeRep Rep = { 921 static_cast<BucketT*>(operator new(sizeof(BucketT) * Num)), Num 922 }; 923 return Rep; 924 } 925}; 926 927template<typename KeyT, typename ValueT, 928 typename KeyInfoT, bool IsConst> 929class DenseMapIterator { 930 typedef std::pair<KeyT, ValueT> Bucket; 931 typedef DenseMapIterator<KeyT, ValueT, 932 KeyInfoT, true> ConstIterator; 933 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, true>; 934public: 935 typedef ptrdiff_t difference_type; 936 typedef typename conditional<IsConst, const Bucket, Bucket>::type value_type; 937 typedef value_type *pointer; 938 typedef value_type &reference; 939 typedef std::forward_iterator_tag iterator_category; 940private: 941 pointer Ptr, End; 942public: 943 DenseMapIterator() : Ptr(0), End(0) {} 944 945 DenseMapIterator(pointer Pos, pointer E, bool NoAdvance = false) 946 : Ptr(Pos), End(E) { 947 if (!NoAdvance) AdvancePastEmptyBuckets(); 948 } 949 950 // If IsConst is true this is a converting constructor from iterator to 951 // const_iterator and the default copy constructor is used. 952 // Otherwise this is a copy constructor for iterator. 953 DenseMapIterator(const DenseMapIterator<KeyT, ValueT, 954 KeyInfoT, false>& I) 955 : Ptr(I.Ptr), End(I.End) {} 956 957 reference operator*() const { 958 return *Ptr; 959 } 960 pointer operator->() const { 961 return Ptr; 962 } 963 964 bool operator==(const ConstIterator &RHS) const { 965 return Ptr == RHS.operator->(); 966 } 967 bool operator!=(const ConstIterator &RHS) const { 968 return Ptr != RHS.operator->(); 969 } 970 971 inline DenseMapIterator& operator++() { // Preincrement 972 ++Ptr; 973 AdvancePastEmptyBuckets(); 974 return *this; 975 } 976 DenseMapIterator operator++(int) { // Postincrement 977 DenseMapIterator tmp = *this; ++*this; return tmp; 978 } 979 980private: 981 void AdvancePastEmptyBuckets() { 982 const KeyT Empty = KeyInfoT::getEmptyKey(); 983 const KeyT Tombstone = KeyInfoT::getTombstoneKey(); 984 985 while (Ptr != End && 986 (KeyInfoT::isEqual(Ptr->first, Empty) || 987 KeyInfoT::isEqual(Ptr->first, Tombstone))) 988 ++Ptr; 989 } 990}; 991 992template<typename KeyT, typename ValueT, typename KeyInfoT> 993static inline size_t 994capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) { 995 return X.getMemorySize(); 996} 997 998} // end namespace llvm 999 1000#endif 1001