DenseMap.h revision ac24e251014de60a16558fc0a1f2340c334d2aa8
1//===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the DenseMap class. 11// 12//===----------------------------------------------------------------------===// 13 14#ifndef LLVM_ADT_DENSEMAP_H 15#define LLVM_ADT_DENSEMAP_H 16 17#include "llvm/Support/Compiler.h" 18#include "llvm/Support/AlignOf.h" 19#include "llvm/Support/MathExtras.h" 20#include "llvm/Support/PointerLikeTypeTraits.h" 21#include "llvm/Support/type_traits.h" 22#include "llvm/ADT/DenseMapInfo.h" 23#include <algorithm> 24#include <iterator> 25#include <new> 26#include <utility> 27#include <cassert> 28#include <climits> 29#include <cstddef> 30#include <cstring> 31 32namespace llvm { 33 34template<typename KeyT, typename ValueT, 35 typename KeyInfoT = DenseMapInfo<KeyT>, 36 bool IsConst = false> 37class DenseMapIterator; 38 39template<typename DerivedT, 40 typename KeyT, typename ValueT, typename KeyInfoT> 41class DenseMapBase { 42protected: 43 typedef std::pair<KeyT, ValueT> BucketT; 44 45public: 46 typedef KeyT key_type; 47 typedef ValueT mapped_type; 48 typedef BucketT value_type; 49 50 typedef DenseMapIterator<KeyT, ValueT, KeyInfoT> iterator; 51 typedef DenseMapIterator<KeyT, ValueT, 52 KeyInfoT, true> const_iterator; 53 inline iterator begin() { 54 // When the map is empty, avoid the overhead of AdvancePastEmptyBuckets(). 55 return empty() ? end() : iterator(getBuckets(), getBucketsEnd()); 56 } 57 inline iterator end() { 58 return iterator(getBucketsEnd(), getBucketsEnd(), true); 59 } 60 inline const_iterator begin() const { 61 return empty() ? end() : const_iterator(getBuckets(), getBucketsEnd()); 62 } 63 inline const_iterator end() const { 64 return const_iterator(getBucketsEnd(), getBucketsEnd(), true); 65 } 66 67 bool empty() const { return getNumEntries() == 0; } 68 unsigned size() const { return getNumEntries(); } 69 70 /// Grow the densemap so that it has at least Size buckets. Does not shrink 71 void resize(size_t Size) { 72 if (Size > getNumBuckets()) 73 grow(Size); 74 } 75 76 void clear() { 77 if (getNumEntries() == 0 && getNumTombstones() == 0) return; 78 79 // If the capacity of the array is huge, and the # elements used is small, 80 // shrink the array. 81 if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) { 82 shrink_and_clear(); 83 return; 84 } 85 86 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey(); 87 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) { 88 if (!KeyInfoT::isEqual(P->first, EmptyKey)) { 89 if (!KeyInfoT::isEqual(P->first, TombstoneKey)) { 90 P->second.~ValueT(); 91 decrementNumEntries(); 92 } 93 P->first = EmptyKey; 94 } 95 } 96 assert(getNumEntries() == 0 && "Node count imbalance!"); 97 setNumTombstones(0); 98 } 99 100 /// count - Return true if the specified key is in the map. 101 bool count(const KeyT &Val) const { 102 const BucketT *TheBucket; 103 return LookupBucketFor(Val, TheBucket); 104 } 105 106 iterator find(const KeyT &Val) { 107 BucketT *TheBucket; 108 if (LookupBucketFor(Val, TheBucket)) 109 return iterator(TheBucket, getBucketsEnd(), true); 110 return end(); 111 } 112 const_iterator find(const KeyT &Val) const { 113 const BucketT *TheBucket; 114 if (LookupBucketFor(Val, TheBucket)) 115 return const_iterator(TheBucket, getBucketsEnd(), true); 116 return end(); 117 } 118 119 /// Alternate version of find() which allows a different, and possibly 120 /// less expensive, key type. 121 /// The DenseMapInfo is responsible for supplying methods 122 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key 123 /// type used. 124 template<class LookupKeyT> 125 iterator find_as(const LookupKeyT &Val) { 126 BucketT *TheBucket; 127 if (LookupBucketFor(Val, TheBucket)) 128 return iterator(TheBucket, getBucketsEnd(), true); 129 return end(); 130 } 131 template<class LookupKeyT> 132 const_iterator find_as(const LookupKeyT &Val) const { 133 const BucketT *TheBucket; 134 if (LookupBucketFor(Val, TheBucket)) 135 return const_iterator(TheBucket, getBucketsEnd(), true); 136 return end(); 137 } 138 139 /// lookup - Return the entry for the specified key, or a default 140 /// constructed value if no such entry exists. 141 ValueT lookup(const KeyT &Val) const { 142 const BucketT *TheBucket; 143 if (LookupBucketFor(Val, TheBucket)) 144 return TheBucket->second; 145 return ValueT(); 146 } 147 148 // Inserts key,value pair into the map if the key isn't already in the map. 149 // If the key is already in the map, it returns false and doesn't update the 150 // value. 151 std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) { 152 BucketT *TheBucket; 153 if (LookupBucketFor(KV.first, TheBucket)) 154 return std::make_pair(iterator(TheBucket, getBucketsEnd(), true), 155 false); // Already in map. 156 157 // Otherwise, insert the new element. 158 TheBucket = InsertIntoBucket(KV.first, KV.second, TheBucket); 159 return std::make_pair(iterator(TheBucket, getBucketsEnd(), true), true); 160 } 161 162 /// insert - Range insertion of pairs. 163 template<typename InputIt> 164 void insert(InputIt I, InputIt E) { 165 for (; I != E; ++I) 166 insert(*I); 167 } 168 169 170 bool erase(const KeyT &Val) { 171 BucketT *TheBucket; 172 if (!LookupBucketFor(Val, TheBucket)) 173 return false; // not in map. 174 175 TheBucket->second.~ValueT(); 176 TheBucket->first = getTombstoneKey(); 177 decrementNumEntries(); 178 incrementNumTombstones(); 179 return true; 180 } 181 void erase(iterator I) { 182 BucketT *TheBucket = &*I; 183 TheBucket->second.~ValueT(); 184 TheBucket->first = getTombstoneKey(); 185 decrementNumEntries(); 186 incrementNumTombstones(); 187 } 188 189 value_type& FindAndConstruct(const KeyT &Key) { 190 BucketT *TheBucket; 191 if (LookupBucketFor(Key, TheBucket)) 192 return *TheBucket; 193 194 return *InsertIntoBucket(Key, ValueT(), TheBucket); 195 } 196 197 ValueT &operator[](const KeyT &Key) { 198 return FindAndConstruct(Key).second; 199 } 200 201#if LLVM_USE_RVALUE_REFERENCES 202 value_type& FindAndConstruct(KeyT &&Key) { 203 BucketT *TheBucket; 204 if (LookupBucketFor(Key, TheBucket)) 205 return *TheBucket; 206 207 return *InsertIntoBucket(Key, ValueT(), TheBucket); 208 } 209 210 ValueT &operator[](KeyT &&Key) { 211 return FindAndConstruct(Key).second; 212 } 213#endif 214 215 /// isPointerIntoBucketsArray - Return true if the specified pointer points 216 /// somewhere into the DenseMap's array of buckets (i.e. either to a key or 217 /// value in the DenseMap). 218 bool isPointerIntoBucketsArray(const void *Ptr) const { 219 return Ptr >= getBuckets() && Ptr < getBucketsEnd(); 220 } 221 222 /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets 223 /// array. In conjunction with the previous method, this can be used to 224 /// determine whether an insertion caused the DenseMap to reallocate. 225 const void *getPointerIntoBucketsArray() const { return getBuckets(); } 226 227protected: 228 DenseMapBase() {} 229 230 void destroyAll() { 231 if (getNumBuckets() == 0) // Nothing to do. 232 return; 233 234 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey(); 235 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) { 236 if (!KeyInfoT::isEqual(P->first, EmptyKey) && 237 !KeyInfoT::isEqual(P->first, TombstoneKey)) 238 P->second.~ValueT(); 239 P->first.~KeyT(); 240 } 241 242#ifndef NDEBUG 243 memset((void*)getBuckets(), 0x5a, sizeof(BucketT)*getNumBuckets()); 244#endif 245 } 246 247 void initEmpty() { 248 setNumEntries(0); 249 setNumTombstones(0); 250 251 assert((getNumBuckets() & (getNumBuckets()-1)) == 0 && 252 "# initial buckets must be a power of two!"); 253 const KeyT EmptyKey = getEmptyKey(); 254 for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B) 255 new (&B->first) KeyT(EmptyKey); 256 } 257 258 void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) { 259 initEmpty(); 260 261 // Insert all the old elements. 262 const KeyT EmptyKey = getEmptyKey(); 263 const KeyT TombstoneKey = getTombstoneKey(); 264 for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) { 265 if (!KeyInfoT::isEqual(B->first, EmptyKey) && 266 !KeyInfoT::isEqual(B->first, TombstoneKey)) { 267 // Insert the key/value into the new table. 268 BucketT *DestBucket; 269 bool FoundVal = LookupBucketFor(B->first, DestBucket); 270 (void)FoundVal; // silence warning. 271 assert(!FoundVal && "Key already in new map?"); 272 DestBucket->first = llvm_move(B->first); 273 new (&DestBucket->second) ValueT(llvm_move(B->second)); 274 incrementNumEntries(); 275 276 // Free the value. 277 B->second.~ValueT(); 278 } 279 B->first.~KeyT(); 280 } 281 282#ifndef NDEBUG 283 if (OldBucketsBegin != OldBucketsEnd) 284 memset((void*)OldBucketsBegin, 0x5a, 285 sizeof(BucketT) * (OldBucketsEnd - OldBucketsBegin)); 286#endif 287 } 288 289 template <typename OtherBaseT> 290 void copyFrom(const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT>& other) { 291 assert(getNumBuckets() == other.getNumBuckets()); 292 293 setNumEntries(other.getNumEntries()); 294 setNumTombstones(other.getNumTombstones()); 295 296 if (isPodLike<KeyT>::value && isPodLike<ValueT>::value) 297 memcpy(getBuckets(), other.getBuckets(), 298 getNumBuckets() * sizeof(BucketT)); 299 else 300 for (size_t i = 0; i < getNumBuckets(); ++i) { 301 new (&getBuckets()[i].first) KeyT(other.getBuckets()[i].first); 302 if (!KeyInfoT::isEqual(getBuckets()[i].first, getEmptyKey()) && 303 !KeyInfoT::isEqual(getBuckets()[i].first, getTombstoneKey())) 304 new (&getBuckets()[i].second) ValueT(other.getBuckets()[i].second); 305 } 306 } 307 308 void swap(DenseMapBase& RHS) { 309 std::swap(getNumEntries(), RHS.getNumEntries()); 310 std::swap(getNumTombstones(), RHS.getNumTombstones()); 311 } 312 313 static unsigned getHashValue(const KeyT &Val) { 314 return KeyInfoT::getHashValue(Val); 315 } 316 template<typename LookupKeyT> 317 static unsigned getHashValue(const LookupKeyT &Val) { 318 return KeyInfoT::getHashValue(Val); 319 } 320 static const KeyT getEmptyKey() { 321 return KeyInfoT::getEmptyKey(); 322 } 323 static const KeyT getTombstoneKey() { 324 return KeyInfoT::getTombstoneKey(); 325 } 326 327private: 328 unsigned getNumEntries() const { 329 return static_cast<const DerivedT *>(this)->getNumEntries(); 330 } 331 void setNumEntries(unsigned Num) { 332 static_cast<DerivedT *>(this)->setNumEntries(Num); 333 } 334 void incrementNumEntries() { 335 setNumEntries(getNumEntries() + 1); 336 } 337 void decrementNumEntries() { 338 setNumEntries(getNumEntries() - 1); 339 } 340 unsigned getNumTombstones() const { 341 return static_cast<const DerivedT *>(this)->getNumTombstones(); 342 } 343 void setNumTombstones(unsigned Num) { 344 static_cast<DerivedT *>(this)->setNumTombstones(Num); 345 } 346 void incrementNumTombstones() { 347 setNumTombstones(getNumTombstones() + 1); 348 } 349 void decrementNumTombstones() { 350 setNumTombstones(getNumTombstones() - 1); 351 } 352 const BucketT *getBuckets() const { 353 return static_cast<const DerivedT *>(this)->getBuckets(); 354 } 355 BucketT *getBuckets() { 356 return static_cast<DerivedT *>(this)->getBuckets(); 357 } 358 unsigned getNumBuckets() const { 359 return static_cast<const DerivedT *>(this)->getNumBuckets(); 360 } 361 BucketT *getBucketsEnd() { 362 return getBuckets() + getNumBuckets(); 363 } 364 const BucketT *getBucketsEnd() const { 365 return getBuckets() + getNumBuckets(); 366 } 367 368 void grow(unsigned AtLeast) { 369 static_cast<DerivedT *>(this)->grow(AtLeast); 370 } 371 372 void shrink_and_clear() { 373 static_cast<DerivedT *>(this)->shrink_and_clear(); 374 } 375 376 377 BucketT *InsertIntoBucket(const KeyT &Key, const ValueT &Value, 378 BucketT *TheBucket) { 379 TheBucket = InsertIntoBucketImpl(Key, TheBucket); 380 381 TheBucket->first = Key; 382 new (&TheBucket->second) ValueT(Value); 383 return TheBucket; 384 } 385 386#if LLVM_USE_RVALUE_REFERENCES 387 BucketT *InsertIntoBucket(const KeyT &Key, ValueT &&Value, 388 BucketT *TheBucket) { 389 TheBucket = InsertIntoBucketImpl(Key, TheBucket); 390 391 TheBucket->first = Key; 392 new (&TheBucket->second) ValueT(std::move(Value)); 393 return TheBucket; 394 } 395 396 BucketT *InsertIntoBucket(KeyT &&Key, ValueT &&Value, BucketT *TheBucket) { 397 TheBucket = InsertIntoBucketImpl(Key, TheBucket); 398 399 TheBucket->first = std::move(Key); 400 new (&TheBucket->second) ValueT(std::move(Value)); 401 return TheBucket; 402 } 403#endif 404 405 BucketT *InsertIntoBucketImpl(const KeyT &Key, BucketT *TheBucket) { 406 // If the load of the hash table is more than 3/4, or if fewer than 1/8 of 407 // the buckets are empty (meaning that many are filled with tombstones), 408 // grow the table. 409 // 410 // The later case is tricky. For example, if we had one empty bucket with 411 // tons of tombstones, failing lookups (e.g. for insertion) would have to 412 // probe almost the entire table until it found the empty bucket. If the 413 // table completely filled with tombstones, no lookup would ever succeed, 414 // causing infinite loops in lookup. 415 unsigned NewNumEntries = getNumEntries() + 1; 416 unsigned NumBuckets = getNumBuckets(); 417 if (NewNumEntries*4 >= NumBuckets*3) { 418 this->grow(NumBuckets * 2); 419 LookupBucketFor(Key, TheBucket); 420 NumBuckets = getNumBuckets(); 421 } 422 if (NumBuckets-(NewNumEntries+getNumTombstones()) <= NumBuckets/8) { 423 this->grow(NumBuckets); 424 LookupBucketFor(Key, TheBucket); 425 } 426 427 // Only update the state after we've grown our bucket space appropriately 428 // so that when growing buckets we have self-consistent entry count. 429 incrementNumEntries(); 430 431 // If we are writing over a tombstone, remember this. 432 if (!KeyInfoT::isEqual(TheBucket->first, getEmptyKey())) 433 decrementNumTombstones(); 434 435 return TheBucket; 436 } 437 438 /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in 439 /// FoundBucket. If the bucket contains the key and a value, this returns 440 /// true, otherwise it returns a bucket with an empty marker or tombstone and 441 /// returns false. 442 template<typename LookupKeyT> 443 bool LookupBucketFor(const LookupKeyT &Val, 444 const BucketT *&FoundBucket) const { 445 unsigned BucketNo = getHashValue(Val); 446 unsigned ProbeAmt = 1; 447 const BucketT *BucketsPtr = getBuckets(); 448 449 if (getNumBuckets() == 0) { 450 FoundBucket = 0; 451 return false; 452 } 453 454 // FoundTombstone - Keep track of whether we find a tombstone while probing. 455 const BucketT *FoundTombstone = 0; 456 const KeyT EmptyKey = getEmptyKey(); 457 const KeyT TombstoneKey = getTombstoneKey(); 458 assert(!KeyInfoT::isEqual(Val, EmptyKey) && 459 !KeyInfoT::isEqual(Val, TombstoneKey) && 460 "Empty/Tombstone value shouldn't be inserted into map!"); 461 462 while (1) { 463 const BucketT *ThisBucket = BucketsPtr + (BucketNo & (getNumBuckets()-1)); 464 // Found Val's bucket? If so, return it. 465 if (KeyInfoT::isEqual(Val, ThisBucket->first)) { 466 FoundBucket = ThisBucket; 467 return true; 468 } 469 470 // If we found an empty bucket, the key doesn't exist in the set. 471 // Insert it and return the default value. 472 if (KeyInfoT::isEqual(ThisBucket->first, EmptyKey)) { 473 // If we've already seen a tombstone while probing, fill it in instead 474 // of the empty bucket we eventually probed to. 475 if (FoundTombstone) ThisBucket = FoundTombstone; 476 FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket; 477 return false; 478 } 479 480 // If this is a tombstone, remember it. If Val ends up not in the map, we 481 // prefer to return it than something that would require more probing. 482 if (KeyInfoT::isEqual(ThisBucket->first, TombstoneKey) && !FoundTombstone) 483 FoundTombstone = ThisBucket; // Remember the first tombstone found. 484 485 // Otherwise, it's a hash collision or a tombstone, continue quadratic 486 // probing. 487 BucketNo += ProbeAmt++; 488 } 489 } 490 491 template <typename LookupKeyT> 492 bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) { 493 const BucketT *ConstFoundBucket; 494 bool Result = const_cast<const DenseMapBase *>(this) 495 ->LookupBucketFor(Val, ConstFoundBucket); 496 FoundBucket = const_cast<BucketT *>(ConstFoundBucket); 497 return Result; 498 } 499 500public: 501 /// Return the approximate size (in bytes) of the actual map. 502 /// This is just the raw memory used by DenseMap. 503 /// If entries are pointers to objects, the size of the referenced objects 504 /// are not included. 505 size_t getMemorySize() const { 506 return getNumBuckets() * sizeof(BucketT); 507 } 508}; 509 510template<typename KeyT, typename ValueT, 511 typename KeyInfoT = DenseMapInfo<KeyT> > 512class DenseMap 513 : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT>, 514 KeyT, ValueT, KeyInfoT> { 515 // Lift some types from the dependent base class into this class for 516 // simplicity of referring to them. 517 typedef DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT> BaseT; 518 typedef typename BaseT::BucketT BucketT; 519 friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT>; 520 521 BucketT *Buckets; 522 unsigned NumEntries; 523 unsigned NumTombstones; 524 unsigned NumBuckets; 525 526public: 527 explicit DenseMap(unsigned NumInitBuckets = 0) { 528 init(NumInitBuckets); 529 } 530 531 DenseMap(const DenseMap &other) { 532 init(0); 533 copyFrom(other); 534 } 535 536#if LLVM_USE_RVALUE_REFERENCES 537 DenseMap(DenseMap &&other) { 538 init(0); 539 swap(other); 540 } 541#endif 542 543 template<typename InputIt> 544 DenseMap(const InputIt &I, const InputIt &E) { 545 init(NextPowerOf2(std::distance(I, E))); 546 this->insert(I, E); 547 } 548 549 ~DenseMap() { 550 this->destroyAll(); 551 operator delete(Buckets); 552 } 553 554 void swap(DenseMap& RHS) { 555 std::swap(Buckets, RHS.Buckets); 556 std::swap(NumEntries, RHS.NumEntries); 557 std::swap(NumTombstones, RHS.NumTombstones); 558 std::swap(NumBuckets, RHS.NumBuckets); 559 } 560 561 DenseMap& operator=(const DenseMap& other) { 562 copyFrom(other); 563 return *this; 564 } 565 566#if LLVM_USE_RVALUE_REFERENCES 567 DenseMap& operator=(DenseMap &&other) { 568 this->destroyAll(); 569 operator delete(Buckets); 570 init(0); 571 swap(other); 572 return *this; 573 } 574#endif 575 576 void copyFrom(const DenseMap& other) { 577 this->destroyAll(); 578 operator delete(Buckets); 579 if (allocateBuckets(other.NumBuckets)) { 580 this->BaseT::copyFrom(other); 581 } else { 582 NumEntries = 0; 583 NumTombstones = 0; 584 } 585 } 586 587 void init(unsigned InitBuckets) { 588 if (allocateBuckets(InitBuckets)) { 589 this->BaseT::initEmpty(); 590 } else { 591 NumEntries = 0; 592 NumTombstones = 0; 593 } 594 } 595 596 void grow(unsigned AtLeast) { 597 unsigned OldNumBuckets = NumBuckets; 598 BucketT *OldBuckets = Buckets; 599 600 allocateBuckets(std::max<unsigned>(64, NextPowerOf2(AtLeast))); 601 assert(Buckets); 602 if (!OldBuckets) { 603 this->BaseT::initEmpty(); 604 return; 605 } 606 607 this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets); 608 609 // Free the old table. 610 operator delete(OldBuckets); 611 } 612 613 void shrink_and_clear() { 614 unsigned OldNumEntries = NumEntries; 615 this->destroyAll(); 616 617 // Reduce the number of buckets. 618 unsigned NewNumBuckets 619 = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1)); 620 if (NewNumBuckets == NumBuckets) { 621 this->BaseT::initEmpty(); 622 return; 623 } 624 625 operator delete(Buckets); 626 init(NewNumBuckets); 627 } 628 629private: 630 unsigned getNumEntries() const { 631 return NumEntries; 632 } 633 void setNumEntries(unsigned Num) { 634 NumEntries = Num; 635 } 636 637 unsigned getNumTombstones() const { 638 return NumTombstones; 639 } 640 void setNumTombstones(unsigned Num) { 641 NumTombstones = Num; 642 } 643 644 BucketT *getBuckets() const { 645 return Buckets; 646 } 647 648 unsigned getNumBuckets() const { 649 return NumBuckets; 650 } 651 652 bool allocateBuckets(unsigned Num) { 653 NumBuckets = Num; 654 if (NumBuckets == 0) { 655 Buckets = 0; 656 return false; 657 } 658 659 Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT) * NumBuckets)); 660 return true; 661 } 662}; 663 664template<typename KeyT, typename ValueT, 665 unsigned InlineBuckets = 4, 666 typename KeyInfoT = DenseMapInfo<KeyT> > 667class SmallDenseMap 668 : public DenseMapBase<SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT>, 669 KeyT, ValueT, KeyInfoT> { 670 // Lift some types from the dependent base class into this class for 671 // simplicity of referring to them. 672 typedef DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT> BaseT; 673 typedef typename BaseT::BucketT BucketT; 674 friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT>; 675 676 unsigned Small : 1; 677 unsigned NumEntries : 31; 678 unsigned NumTombstones; 679 680 struct LargeRep { 681 BucketT *Buckets; 682 unsigned NumBuckets; 683 }; 684 685 /// A "union" of an inline bucket array and the struct representing 686 /// a large bucket. This union will be discriminated by the 'Small' bit. 687 typename AlignedCharArray<BucketT[InlineBuckets], LargeRep>::union_type 688 storage; 689 690public: 691 explicit SmallDenseMap(unsigned NumInitBuckets = 0) { 692 init(NumInitBuckets); 693 } 694 695 SmallDenseMap(const SmallDenseMap &other) { 696 init(0); 697 copyFrom(other); 698 } 699 700#if LLVM_USE_RVALUE_REFERENCES 701 SmallDenseMap(SmallDenseMap &&other) { 702 init(0); 703 swap(other); 704 } 705#endif 706 707 template<typename InputIt> 708 SmallDenseMap(const InputIt &I, const InputIt &E) { 709 init(NextPowerOf2(std::distance(I, E))); 710 this->insert(I, E); 711 } 712 713 ~SmallDenseMap() { 714 this->destroyAll(); 715 deallocateBuckets(); 716 } 717 718 void swap(SmallDenseMap& RHS) { 719 unsigned TmpNumEntries = RHS.NumEntries; 720 RHS.NumEntries = NumEntries; 721 NumEntries = TmpNumEntries; 722 std::swap(NumTombstones, RHS.NumTombstones); 723 724 const KeyT EmptyKey = this->getEmptyKey(); 725 const KeyT TombstoneKey = this->getTombstoneKey(); 726 if (Small && RHS.Small) { 727 // If we're swapping inline bucket arrays, we have to cope with some of 728 // the tricky bits of DenseMap's storage system: the buckets are not 729 // fully initialized. Thus we swap every key, but we may have 730 // a one-directional move of the value. 731 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) { 732 BucketT *LHSB = &getInlineBuckets()[i], 733 *RHSB = &RHS.getInlineBuckets()[i]; 734 bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->first, EmptyKey) && 735 !KeyInfoT::isEqual(LHSB->first, TombstoneKey)); 736 bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->first, EmptyKey) && 737 !KeyInfoT::isEqual(RHSB->first, TombstoneKey)); 738 if (hasLHSValue && hasRHSValue) { 739 // Swap together if we can... 740 std::swap(*LHSB, *RHSB); 741 continue; 742 } 743 // Swap separately and handle any assymetry. 744 std::swap(LHSB->first, RHSB->first); 745 if (hasLHSValue) { 746 new (&RHSB->second) ValueT(llvm_move(LHSB->second)); 747 LHSB->second.~ValueT(); 748 } else if (hasRHSValue) { 749 new (&LHSB->second) ValueT(llvm_move(RHSB->second)); 750 RHSB->second.~ValueT(); 751 } 752 } 753 return; 754 } 755 if (!Small && !RHS.Small) { 756 std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets); 757 std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets); 758 return; 759 } 760 761 SmallDenseMap &SmallSide = Small ? *this : RHS; 762 SmallDenseMap &LargeSide = Small ? RHS : *this; 763 764 // First stash the large side's rep and move the small side across. 765 LargeRep TmpRep = llvm_move(*LargeSide.getLargeRep()); 766 LargeSide.getLargeRep()->~LargeRep(); 767 LargeSide.Small = true; 768 // This is similar to the standard move-from-old-buckets, but the bucket 769 // count hasn't actually rotated in this case. So we have to carefully 770 // move construct the keys and values into their new locations, but there 771 // is no need to re-hash things. 772 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) { 773 BucketT *NewB = &LargeSide.getInlineBuckets()[i], 774 *OldB = &SmallSide.getInlineBuckets()[i]; 775 new (&NewB->first) KeyT(llvm_move(OldB->first)); 776 OldB->first.~KeyT(); 777 if (!KeyInfoT::isEqual(NewB->first, EmptyKey) && 778 !KeyInfoT::isEqual(NewB->first, TombstoneKey)) { 779 new (&NewB->second) ValueT(llvm_move(OldB->second)); 780 OldB->second.~ValueT(); 781 } 782 } 783 784 // The hard part of moving the small buckets across is done, just move 785 // the TmpRep into its new home. 786 SmallSide.Small = false; 787 new (SmallSide.getLargeRep()) LargeRep(llvm_move(TmpRep)); 788 } 789 790 SmallDenseMap& operator=(const SmallDenseMap& other) { 791 copyFrom(other); 792 return *this; 793 } 794 795#if LLVM_USE_RVALUE_REFERENCES 796 SmallDenseMap& operator=(SmallDenseMap &&other) { 797 this->destroyAll(); 798 deallocateBuckets(); 799 init(0); 800 swap(other); 801 return *this; 802 } 803#endif 804 805 void copyFrom(const SmallDenseMap& other) { 806 this->destroyAll(); 807 deallocateBuckets(); 808 Small = true; 809 if (other.getNumBuckets() > InlineBuckets) { 810 Small = false; 811 allocateBuckets(other.getNumBuckets()); 812 } 813 this->BaseT::copyFrom(other); 814 } 815 816 void init(unsigned InitBuckets) { 817 Small = true; 818 if (InitBuckets > InlineBuckets) { 819 Small = false; 820 new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets)); 821 } 822 this->BaseT::initEmpty(); 823 } 824 825 void grow(unsigned AtLeast) { 826 if (AtLeast > InlineBuckets) 827 AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast)); 828 829 if (Small) { 830 if (AtLeast <= InlineBuckets) 831 return; // Nothing to do. 832 833 // First move the inline buckets into a temporary storage. 834 typename AlignedCharArray<BucketT[InlineBuckets]>::union_type 835 TmpStorage; 836 BucketT *TmpBegin = reinterpret_cast<BucketT *>(TmpStorage.buffer); 837 BucketT *TmpEnd = TmpBegin; 838 839 // Loop over the buckets, moving non-empty, non-tombstones into the 840 // temporary storage. Have the loop move the TmpEnd forward as it goes. 841 const KeyT EmptyKey = this->getEmptyKey(); 842 const KeyT TombstoneKey = this->getTombstoneKey(); 843 for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) { 844 if (!KeyInfoT::isEqual(P->first, EmptyKey) && 845 !KeyInfoT::isEqual(P->first, TombstoneKey)) { 846 assert(size_t(TmpEnd - TmpBegin) < InlineBuckets && 847 "Too many inline buckets!"); 848 new (&TmpEnd->first) KeyT(llvm_move(P->first)); 849 new (&TmpEnd->second) ValueT(llvm_move(P->second)); 850 ++TmpEnd; 851 P->second.~ValueT(); 852 } 853 P->first.~KeyT(); 854 } 855 856 // Now make this map use the large rep, and move all the entries back 857 // into it. 858 Small = false; 859 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast)); 860 this->moveFromOldBuckets(TmpBegin, TmpEnd); 861 return; 862 } 863 864 LargeRep OldRep = llvm_move(*getLargeRep()); 865 getLargeRep()->~LargeRep(); 866 if (AtLeast <= InlineBuckets) { 867 Small = true; 868 } else { 869 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast)); 870 } 871 872 this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets); 873 874 // Free the old table. 875 operator delete(OldRep.Buckets); 876 } 877 878 void shrink_and_clear() { 879 unsigned OldSize = this->size(); 880 this->destroyAll(); 881 882 // Reduce the number of buckets. 883 unsigned NewNumBuckets = 0; 884 if (OldSize) { 885 NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1); 886 if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u) 887 NewNumBuckets = 64; 888 } 889 if ((Small && NewNumBuckets <= InlineBuckets) || 890 (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) { 891 this->BaseT::initEmpty(); 892 return; 893 } 894 895 deallocateBuckets(); 896 init(NewNumBuckets); 897 } 898 899private: 900 unsigned getNumEntries() const { 901 return NumEntries; 902 } 903 void setNumEntries(unsigned Num) { 904 assert(Num < INT_MAX && "Cannot support more than INT_MAX entries"); 905 NumEntries = Num; 906 } 907 908 unsigned getNumTombstones() const { 909 return NumTombstones; 910 } 911 void setNumTombstones(unsigned Num) { 912 NumTombstones = Num; 913 } 914 915 const BucketT *getInlineBuckets() const { 916 assert(Small); 917 // Note that this cast does not violate aliasing rules as we assert that 918 // the memory's dynamic type is the small, inline bucket buffer, and the 919 // 'storage.buffer' static type is 'char *'. 920 return reinterpret_cast<const BucketT *>(storage.buffer); 921 } 922 BucketT *getInlineBuckets() { 923 return const_cast<BucketT *>( 924 const_cast<const SmallDenseMap *>(this)->getInlineBuckets()); 925 } 926 const LargeRep *getLargeRep() const { 927 assert(!Small); 928 // Note, same rule about aliasing as with getInlineBuckets. 929 return reinterpret_cast<const LargeRep *>(storage.buffer); 930 } 931 LargeRep *getLargeRep() { 932 return const_cast<LargeRep *>( 933 const_cast<const SmallDenseMap *>(this)->getLargeRep()); 934 } 935 936 const BucketT *getBuckets() const { 937 return Small ? getInlineBuckets() : getLargeRep()->Buckets; 938 } 939 BucketT *getBuckets() { 940 return const_cast<BucketT *>( 941 const_cast<const SmallDenseMap *>(this)->getBuckets()); 942 } 943 unsigned getNumBuckets() const { 944 return Small ? InlineBuckets : getLargeRep()->NumBuckets; 945 } 946 947 void deallocateBuckets() { 948 if (Small) 949 return; 950 951 operator delete(getLargeRep()->Buckets); 952 getLargeRep()->~LargeRep(); 953 } 954 955 LargeRep allocateBuckets(unsigned Num) { 956 assert(Num > InlineBuckets && "Must allocate more buckets than are inline"); 957 LargeRep Rep = { 958 static_cast<BucketT*>(operator new(sizeof(BucketT) * Num)), Num 959 }; 960 return Rep; 961 } 962}; 963 964template<typename KeyT, typename ValueT, 965 typename KeyInfoT, bool IsConst> 966class DenseMapIterator { 967 typedef std::pair<KeyT, ValueT> Bucket; 968 typedef DenseMapIterator<KeyT, ValueT, 969 KeyInfoT, true> ConstIterator; 970 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, true>; 971public: 972 typedef ptrdiff_t difference_type; 973 typedef typename conditional<IsConst, const Bucket, Bucket>::type value_type; 974 typedef value_type *pointer; 975 typedef value_type &reference; 976 typedef std::forward_iterator_tag iterator_category; 977private: 978 pointer Ptr, End; 979public: 980 DenseMapIterator() : Ptr(0), End(0) {} 981 982 DenseMapIterator(pointer Pos, pointer E, bool NoAdvance = false) 983 : Ptr(Pos), End(E) { 984 if (!NoAdvance) AdvancePastEmptyBuckets(); 985 } 986 987 // If IsConst is true this is a converting constructor from iterator to 988 // const_iterator and the default copy constructor is used. 989 // Otherwise this is a copy constructor for iterator. 990 DenseMapIterator(const DenseMapIterator<KeyT, ValueT, 991 KeyInfoT, false>& I) 992 : Ptr(I.Ptr), End(I.End) {} 993 994 reference operator*() const { 995 return *Ptr; 996 } 997 pointer operator->() const { 998 return Ptr; 999 } 1000 1001 bool operator==(const ConstIterator &RHS) const { 1002 return Ptr == RHS.operator->(); 1003 } 1004 bool operator!=(const ConstIterator &RHS) const { 1005 return Ptr != RHS.operator->(); 1006 } 1007 1008 inline DenseMapIterator& operator++() { // Preincrement 1009 ++Ptr; 1010 AdvancePastEmptyBuckets(); 1011 return *this; 1012 } 1013 DenseMapIterator operator++(int) { // Postincrement 1014 DenseMapIterator tmp = *this; ++*this; return tmp; 1015 } 1016 1017private: 1018 void AdvancePastEmptyBuckets() { 1019 const KeyT Empty = KeyInfoT::getEmptyKey(); 1020 const KeyT Tombstone = KeyInfoT::getTombstoneKey(); 1021 1022 while (Ptr != End && 1023 (KeyInfoT::isEqual(Ptr->first, Empty) || 1024 KeyInfoT::isEqual(Ptr->first, Tombstone))) 1025 ++Ptr; 1026 } 1027}; 1028 1029template<typename KeyT, typename ValueT, typename KeyInfoT> 1030static inline size_t 1031capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) { 1032 return X.getMemorySize(); 1033} 1034 1035} // end namespace llvm 1036 1037#endif 1038