DenseMap.h revision 36b56886974eae4f9c5ebc96befd3e7bfe5de338
1//===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the DenseMap class. 11// 12//===----------------------------------------------------------------------===// 13 14#ifndef LLVM_ADT_DENSEMAP_H 15#define LLVM_ADT_DENSEMAP_H 16 17#include "llvm/ADT/DenseMapInfo.h" 18#include "llvm/Support/AlignOf.h" 19#include "llvm/Support/Compiler.h" 20#include "llvm/Support/MathExtras.h" 21#include "llvm/Support/PointerLikeTypeTraits.h" 22#include "llvm/Support/type_traits.h" 23#include <algorithm> 24#include <cassert> 25#include <climits> 26#include <cstddef> 27#include <cstring> 28#include <iterator> 29#include <new> 30#include <utility> 31 32namespace llvm { 33 34template<typename KeyT, typename ValueT, 35 typename KeyInfoT = DenseMapInfo<KeyT>, 36 bool IsConst = false> 37class DenseMapIterator; 38 39template<typename DerivedT, 40 typename KeyT, typename ValueT, typename KeyInfoT> 41class DenseMapBase { 42protected: 43 typedef std::pair<KeyT, ValueT> BucketT; 44 45public: 46 typedef KeyT key_type; 47 typedef ValueT mapped_type; 48 typedef BucketT value_type; 49 50 typedef DenseMapIterator<KeyT, ValueT, KeyInfoT> iterator; 51 typedef DenseMapIterator<KeyT, ValueT, 52 KeyInfoT, true> const_iterator; 53 inline iterator begin() { 54 // When the map is empty, avoid the overhead of AdvancePastEmptyBuckets(). 55 return empty() ? end() : iterator(getBuckets(), getBucketsEnd()); 56 } 57 inline iterator end() { 58 return iterator(getBucketsEnd(), getBucketsEnd(), true); 59 } 60 inline const_iterator begin() const { 61 return empty() ? end() : const_iterator(getBuckets(), getBucketsEnd()); 62 } 63 inline const_iterator end() const { 64 return const_iterator(getBucketsEnd(), getBucketsEnd(), true); 65 } 66 67 bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const { 68 return getNumEntries() == 0; 69 } 70 unsigned size() const { return getNumEntries(); } 71 72 /// Grow the densemap so that it has at least Size buckets. Does not shrink 73 void resize(size_t Size) { 74 if (Size > getNumBuckets()) 75 grow(Size); 76 } 77 78 void clear() { 79 if (getNumEntries() == 0 && getNumTombstones() == 0) return; 80 81 // If the capacity of the array is huge, and the # elements used is small, 82 // shrink the array. 83 if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) { 84 shrink_and_clear(); 85 return; 86 } 87 88 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey(); 89 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) { 90 if (!KeyInfoT::isEqual(P->first, EmptyKey)) { 91 if (!KeyInfoT::isEqual(P->first, TombstoneKey)) { 92 P->second.~ValueT(); 93 decrementNumEntries(); 94 } 95 P->first = EmptyKey; 96 } 97 } 98 assert(getNumEntries() == 0 && "Node count imbalance!"); 99 setNumTombstones(0); 100 } 101 102 /// count - Return true if the specified key is in the map. 103 bool count(const KeyT &Val) const { 104 const BucketT *TheBucket; 105 return LookupBucketFor(Val, TheBucket); 106 } 107 108 iterator find(const KeyT &Val) { 109 BucketT *TheBucket; 110 if (LookupBucketFor(Val, TheBucket)) 111 return iterator(TheBucket, getBucketsEnd(), true); 112 return end(); 113 } 114 const_iterator find(const KeyT &Val) const { 115 const BucketT *TheBucket; 116 if (LookupBucketFor(Val, TheBucket)) 117 return const_iterator(TheBucket, getBucketsEnd(), true); 118 return end(); 119 } 120 121 /// Alternate version of find() which allows a different, and possibly 122 /// less expensive, key type. 123 /// The DenseMapInfo is responsible for supplying methods 124 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key 125 /// type used. 126 template<class LookupKeyT> 127 iterator find_as(const LookupKeyT &Val) { 128 BucketT *TheBucket; 129 if (LookupBucketFor(Val, TheBucket)) 130 return iterator(TheBucket, getBucketsEnd(), true); 131 return end(); 132 } 133 template<class LookupKeyT> 134 const_iterator find_as(const LookupKeyT &Val) const { 135 const BucketT *TheBucket; 136 if (LookupBucketFor(Val, TheBucket)) 137 return const_iterator(TheBucket, getBucketsEnd(), true); 138 return end(); 139 } 140 141 /// lookup - Return the entry for the specified key, or a default 142 /// constructed value if no such entry exists. 143 ValueT lookup(const KeyT &Val) const { 144 const BucketT *TheBucket; 145 if (LookupBucketFor(Val, TheBucket)) 146 return TheBucket->second; 147 return ValueT(); 148 } 149 150 // Inserts key,value pair into the map if the key isn't already in the map. 151 // If the key is already in the map, it returns false and doesn't update the 152 // value. 153 std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) { 154 BucketT *TheBucket; 155 if (LookupBucketFor(KV.first, TheBucket)) 156 return std::make_pair(iterator(TheBucket, getBucketsEnd(), true), 157 false); // Already in map. 158 159 // Otherwise, insert the new element. 160 TheBucket = InsertIntoBucket(KV.first, KV.second, TheBucket); 161 return std::make_pair(iterator(TheBucket, getBucketsEnd(), true), true); 162 } 163 164 // Inserts key,value pair into the map if the key isn't already in the map. 165 // If the key is already in the map, it returns false and doesn't update the 166 // value. 167 std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) { 168 BucketT *TheBucket; 169 if (LookupBucketFor(KV.first, TheBucket)) 170 return std::make_pair(iterator(TheBucket, getBucketsEnd(), true), 171 false); // Already in map. 172 173 // Otherwise, insert the new element. 174 TheBucket = InsertIntoBucket(std::move(KV.first), 175 std::move(KV.second), 176 TheBucket); 177 return std::make_pair(iterator(TheBucket, getBucketsEnd(), true), true); 178 } 179 180 /// insert - Range insertion of pairs. 181 template<typename InputIt> 182 void insert(InputIt I, InputIt E) { 183 for (; I != E; ++I) 184 insert(*I); 185 } 186 187 188 bool erase(const KeyT &Val) { 189 BucketT *TheBucket; 190 if (!LookupBucketFor(Val, TheBucket)) 191 return false; // not in map. 192 193 TheBucket->second.~ValueT(); 194 TheBucket->first = getTombstoneKey(); 195 decrementNumEntries(); 196 incrementNumTombstones(); 197 return true; 198 } 199 void erase(iterator I) { 200 BucketT *TheBucket = &*I; 201 TheBucket->second.~ValueT(); 202 TheBucket->first = getTombstoneKey(); 203 decrementNumEntries(); 204 incrementNumTombstones(); 205 } 206 207 value_type& FindAndConstruct(const KeyT &Key) { 208 BucketT *TheBucket; 209 if (LookupBucketFor(Key, TheBucket)) 210 return *TheBucket; 211 212 return *InsertIntoBucket(Key, ValueT(), TheBucket); 213 } 214 215 ValueT &operator[](const KeyT &Key) { 216 return FindAndConstruct(Key).second; 217 } 218 219 value_type& FindAndConstruct(KeyT &&Key) { 220 BucketT *TheBucket; 221 if (LookupBucketFor(Key, TheBucket)) 222 return *TheBucket; 223 224 return *InsertIntoBucket(std::move(Key), ValueT(), TheBucket); 225 } 226 227 ValueT &operator[](KeyT &&Key) { 228 return FindAndConstruct(std::move(Key)).second; 229 } 230 231 /// isPointerIntoBucketsArray - Return true if the specified pointer points 232 /// somewhere into the DenseMap's array of buckets (i.e. either to a key or 233 /// value in the DenseMap). 234 bool isPointerIntoBucketsArray(const void *Ptr) const { 235 return Ptr >= getBuckets() && Ptr < getBucketsEnd(); 236 } 237 238 /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets 239 /// array. In conjunction with the previous method, this can be used to 240 /// determine whether an insertion caused the DenseMap to reallocate. 241 const void *getPointerIntoBucketsArray() const { return getBuckets(); } 242 243protected: 244 DenseMapBase() {} 245 246 void destroyAll() { 247 if (getNumBuckets() == 0) // Nothing to do. 248 return; 249 250 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey(); 251 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) { 252 if (!KeyInfoT::isEqual(P->first, EmptyKey) && 253 !KeyInfoT::isEqual(P->first, TombstoneKey)) 254 P->second.~ValueT(); 255 P->first.~KeyT(); 256 } 257 258#ifndef NDEBUG 259 memset((void*)getBuckets(), 0x5a, sizeof(BucketT)*getNumBuckets()); 260#endif 261 } 262 263 void initEmpty() { 264 setNumEntries(0); 265 setNumTombstones(0); 266 267 assert((getNumBuckets() & (getNumBuckets()-1)) == 0 && 268 "# initial buckets must be a power of two!"); 269 const KeyT EmptyKey = getEmptyKey(); 270 for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B) 271 new (&B->first) KeyT(EmptyKey); 272 } 273 274 void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) { 275 initEmpty(); 276 277 // Insert all the old elements. 278 const KeyT EmptyKey = getEmptyKey(); 279 const KeyT TombstoneKey = getTombstoneKey(); 280 for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) { 281 if (!KeyInfoT::isEqual(B->first, EmptyKey) && 282 !KeyInfoT::isEqual(B->first, TombstoneKey)) { 283 // Insert the key/value into the new table. 284 BucketT *DestBucket; 285 bool FoundVal = LookupBucketFor(B->first, DestBucket); 286 (void)FoundVal; // silence warning. 287 assert(!FoundVal && "Key already in new map?"); 288 DestBucket->first = std::move(B->first); 289 new (&DestBucket->second) ValueT(std::move(B->second)); 290 incrementNumEntries(); 291 292 // Free the value. 293 B->second.~ValueT(); 294 } 295 B->first.~KeyT(); 296 } 297 298#ifndef NDEBUG 299 if (OldBucketsBegin != OldBucketsEnd) 300 memset((void*)OldBucketsBegin, 0x5a, 301 sizeof(BucketT) * (OldBucketsEnd - OldBucketsBegin)); 302#endif 303 } 304 305 template <typename OtherBaseT> 306 void copyFrom(const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT>& other) { 307 assert(getNumBuckets() == other.getNumBuckets()); 308 309 setNumEntries(other.getNumEntries()); 310 setNumTombstones(other.getNumTombstones()); 311 312 if (isPodLike<KeyT>::value && isPodLike<ValueT>::value) 313 memcpy(getBuckets(), other.getBuckets(), 314 getNumBuckets() * sizeof(BucketT)); 315 else 316 for (size_t i = 0; i < getNumBuckets(); ++i) { 317 new (&getBuckets()[i].first) KeyT(other.getBuckets()[i].first); 318 if (!KeyInfoT::isEqual(getBuckets()[i].first, getEmptyKey()) && 319 !KeyInfoT::isEqual(getBuckets()[i].first, getTombstoneKey())) 320 new (&getBuckets()[i].second) ValueT(other.getBuckets()[i].second); 321 } 322 } 323 324 void swap(DenseMapBase& RHS) { 325 std::swap(getNumEntries(), RHS.getNumEntries()); 326 std::swap(getNumTombstones(), RHS.getNumTombstones()); 327 } 328 329 static unsigned getHashValue(const KeyT &Val) { 330 return KeyInfoT::getHashValue(Val); 331 } 332 template<typename LookupKeyT> 333 static unsigned getHashValue(const LookupKeyT &Val) { 334 return KeyInfoT::getHashValue(Val); 335 } 336 static const KeyT getEmptyKey() { 337 return KeyInfoT::getEmptyKey(); 338 } 339 static const KeyT getTombstoneKey() { 340 return KeyInfoT::getTombstoneKey(); 341 } 342 343private: 344 unsigned getNumEntries() const { 345 return static_cast<const DerivedT *>(this)->getNumEntries(); 346 } 347 void setNumEntries(unsigned Num) { 348 static_cast<DerivedT *>(this)->setNumEntries(Num); 349 } 350 void incrementNumEntries() { 351 setNumEntries(getNumEntries() + 1); 352 } 353 void decrementNumEntries() { 354 setNumEntries(getNumEntries() - 1); 355 } 356 unsigned getNumTombstones() const { 357 return static_cast<const DerivedT *>(this)->getNumTombstones(); 358 } 359 void setNumTombstones(unsigned Num) { 360 static_cast<DerivedT *>(this)->setNumTombstones(Num); 361 } 362 void incrementNumTombstones() { 363 setNumTombstones(getNumTombstones() + 1); 364 } 365 void decrementNumTombstones() { 366 setNumTombstones(getNumTombstones() - 1); 367 } 368 const BucketT *getBuckets() const { 369 return static_cast<const DerivedT *>(this)->getBuckets(); 370 } 371 BucketT *getBuckets() { 372 return static_cast<DerivedT *>(this)->getBuckets(); 373 } 374 unsigned getNumBuckets() const { 375 return static_cast<const DerivedT *>(this)->getNumBuckets(); 376 } 377 BucketT *getBucketsEnd() { 378 return getBuckets() + getNumBuckets(); 379 } 380 const BucketT *getBucketsEnd() const { 381 return getBuckets() + getNumBuckets(); 382 } 383 384 void grow(unsigned AtLeast) { 385 static_cast<DerivedT *>(this)->grow(AtLeast); 386 } 387 388 void shrink_and_clear() { 389 static_cast<DerivedT *>(this)->shrink_and_clear(); 390 } 391 392 393 BucketT *InsertIntoBucket(const KeyT &Key, const ValueT &Value, 394 BucketT *TheBucket) { 395 TheBucket = InsertIntoBucketImpl(Key, TheBucket); 396 397 TheBucket->first = Key; 398 new (&TheBucket->second) ValueT(Value); 399 return TheBucket; 400 } 401 402 BucketT *InsertIntoBucket(const KeyT &Key, ValueT &&Value, 403 BucketT *TheBucket) { 404 TheBucket = InsertIntoBucketImpl(Key, TheBucket); 405 406 TheBucket->first = Key; 407 new (&TheBucket->second) ValueT(std::move(Value)); 408 return TheBucket; 409 } 410 411 BucketT *InsertIntoBucket(KeyT &&Key, ValueT &&Value, BucketT *TheBucket) { 412 TheBucket = InsertIntoBucketImpl(Key, TheBucket); 413 414 TheBucket->first = std::move(Key); 415 new (&TheBucket->second) ValueT(std::move(Value)); 416 return TheBucket; 417 } 418 419 BucketT *InsertIntoBucketImpl(const KeyT &Key, BucketT *TheBucket) { 420 // If the load of the hash table is more than 3/4, or if fewer than 1/8 of 421 // the buckets are empty (meaning that many are filled with tombstones), 422 // grow the table. 423 // 424 // The later case is tricky. For example, if we had one empty bucket with 425 // tons of tombstones, failing lookups (e.g. for insertion) would have to 426 // probe almost the entire table until it found the empty bucket. If the 427 // table completely filled with tombstones, no lookup would ever succeed, 428 // causing infinite loops in lookup. 429 unsigned NewNumEntries = getNumEntries() + 1; 430 unsigned NumBuckets = getNumBuckets(); 431 if (NewNumEntries*4 >= NumBuckets*3) { 432 this->grow(NumBuckets * 2); 433 LookupBucketFor(Key, TheBucket); 434 NumBuckets = getNumBuckets(); 435 } else if (NumBuckets-(NewNumEntries+getNumTombstones()) <= NumBuckets/8) { 436 this->grow(NumBuckets); 437 LookupBucketFor(Key, TheBucket); 438 } 439 assert(TheBucket); 440 441 // Only update the state after we've grown our bucket space appropriately 442 // so that when growing buckets we have self-consistent entry count. 443 incrementNumEntries(); 444 445 // If we are writing over a tombstone, remember this. 446 const KeyT EmptyKey = getEmptyKey(); 447 if (!KeyInfoT::isEqual(TheBucket->first, EmptyKey)) 448 decrementNumTombstones(); 449 450 return TheBucket; 451 } 452 453 /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in 454 /// FoundBucket. If the bucket contains the key and a value, this returns 455 /// true, otherwise it returns a bucket with an empty marker or tombstone and 456 /// returns false. 457 template<typename LookupKeyT> 458 bool LookupBucketFor(const LookupKeyT &Val, 459 const BucketT *&FoundBucket) const { 460 const BucketT *BucketsPtr = getBuckets(); 461 const unsigned NumBuckets = getNumBuckets(); 462 463 if (NumBuckets == 0) { 464 FoundBucket = 0; 465 return false; 466 } 467 468 // FoundTombstone - Keep track of whether we find a tombstone while probing. 469 const BucketT *FoundTombstone = 0; 470 const KeyT EmptyKey = getEmptyKey(); 471 const KeyT TombstoneKey = getTombstoneKey(); 472 assert(!KeyInfoT::isEqual(Val, EmptyKey) && 473 !KeyInfoT::isEqual(Val, TombstoneKey) && 474 "Empty/Tombstone value shouldn't be inserted into map!"); 475 476 unsigned BucketNo = getHashValue(Val) & (NumBuckets-1); 477 unsigned ProbeAmt = 1; 478 while (1) { 479 const BucketT *ThisBucket = BucketsPtr + BucketNo; 480 // Found Val's bucket? If so, return it. 481 if (KeyInfoT::isEqual(Val, ThisBucket->first)) { 482 FoundBucket = ThisBucket; 483 return true; 484 } 485 486 // If we found an empty bucket, the key doesn't exist in the set. 487 // Insert it and return the default value. 488 if (KeyInfoT::isEqual(ThisBucket->first, EmptyKey)) { 489 // If we've already seen a tombstone while probing, fill it in instead 490 // of the empty bucket we eventually probed to. 491 FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket; 492 return false; 493 } 494 495 // If this is a tombstone, remember it. If Val ends up not in the map, we 496 // prefer to return it than something that would require more probing. 497 if (KeyInfoT::isEqual(ThisBucket->first, TombstoneKey) && !FoundTombstone) 498 FoundTombstone = ThisBucket; // Remember the first tombstone found. 499 500 // Otherwise, it's a hash collision or a tombstone, continue quadratic 501 // probing. 502 BucketNo += ProbeAmt++; 503 BucketNo &= (NumBuckets-1); 504 } 505 } 506 507 template <typename LookupKeyT> 508 bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) { 509 const BucketT *ConstFoundBucket; 510 bool Result = const_cast<const DenseMapBase *>(this) 511 ->LookupBucketFor(Val, ConstFoundBucket); 512 FoundBucket = const_cast<BucketT *>(ConstFoundBucket); 513 return Result; 514 } 515 516public: 517 /// Return the approximate size (in bytes) of the actual map. 518 /// This is just the raw memory used by DenseMap. 519 /// If entries are pointers to objects, the size of the referenced objects 520 /// are not included. 521 size_t getMemorySize() const { 522 return getNumBuckets() * sizeof(BucketT); 523 } 524}; 525 526template<typename KeyT, typename ValueT, 527 typename KeyInfoT = DenseMapInfo<KeyT> > 528class DenseMap 529 : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT>, 530 KeyT, ValueT, KeyInfoT> { 531 // Lift some types from the dependent base class into this class for 532 // simplicity of referring to them. 533 typedef DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT> BaseT; 534 typedef typename BaseT::BucketT BucketT; 535 friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT>; 536 537 BucketT *Buckets; 538 unsigned NumEntries; 539 unsigned NumTombstones; 540 unsigned NumBuckets; 541 542public: 543 explicit DenseMap(unsigned NumInitBuckets = 0) { 544 init(NumInitBuckets); 545 } 546 547 DenseMap(const DenseMap &other) : BaseT() { 548 init(0); 549 copyFrom(other); 550 } 551 552 DenseMap(DenseMap &&other) : BaseT() { 553 init(0); 554 swap(other); 555 } 556 557 template<typename InputIt> 558 DenseMap(const InputIt &I, const InputIt &E) { 559 init(NextPowerOf2(std::distance(I, E))); 560 this->insert(I, E); 561 } 562 563 ~DenseMap() { 564 this->destroyAll(); 565 operator delete(Buckets); 566 } 567 568 void swap(DenseMap& RHS) { 569 std::swap(Buckets, RHS.Buckets); 570 std::swap(NumEntries, RHS.NumEntries); 571 std::swap(NumTombstones, RHS.NumTombstones); 572 std::swap(NumBuckets, RHS.NumBuckets); 573 } 574 575 DenseMap& operator=(const DenseMap& other) { 576 copyFrom(other); 577 return *this; 578 } 579 580 DenseMap& operator=(DenseMap &&other) { 581 this->destroyAll(); 582 operator delete(Buckets); 583 init(0); 584 swap(other); 585 return *this; 586 } 587 588 void copyFrom(const DenseMap& other) { 589 this->destroyAll(); 590 operator delete(Buckets); 591 if (allocateBuckets(other.NumBuckets)) { 592 this->BaseT::copyFrom(other); 593 } else { 594 NumEntries = 0; 595 NumTombstones = 0; 596 } 597 } 598 599 void init(unsigned InitBuckets) { 600 if (allocateBuckets(InitBuckets)) { 601 this->BaseT::initEmpty(); 602 } else { 603 NumEntries = 0; 604 NumTombstones = 0; 605 } 606 } 607 608 void grow(unsigned AtLeast) { 609 unsigned OldNumBuckets = NumBuckets; 610 BucketT *OldBuckets = Buckets; 611 612 allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1)))); 613 assert(Buckets); 614 if (!OldBuckets) { 615 this->BaseT::initEmpty(); 616 return; 617 } 618 619 this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets); 620 621 // Free the old table. 622 operator delete(OldBuckets); 623 } 624 625 void shrink_and_clear() { 626 unsigned OldNumEntries = NumEntries; 627 this->destroyAll(); 628 629 // Reduce the number of buckets. 630 unsigned NewNumBuckets = 0; 631 if (OldNumEntries) 632 NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1)); 633 if (NewNumBuckets == NumBuckets) { 634 this->BaseT::initEmpty(); 635 return; 636 } 637 638 operator delete(Buckets); 639 init(NewNumBuckets); 640 } 641 642private: 643 unsigned getNumEntries() const { 644 return NumEntries; 645 } 646 void setNumEntries(unsigned Num) { 647 NumEntries = Num; 648 } 649 650 unsigned getNumTombstones() const { 651 return NumTombstones; 652 } 653 void setNumTombstones(unsigned Num) { 654 NumTombstones = Num; 655 } 656 657 BucketT *getBuckets() const { 658 return Buckets; 659 } 660 661 unsigned getNumBuckets() const { 662 return NumBuckets; 663 } 664 665 bool allocateBuckets(unsigned Num) { 666 NumBuckets = Num; 667 if (NumBuckets == 0) { 668 Buckets = 0; 669 return false; 670 } 671 672 Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT) * NumBuckets)); 673 return true; 674 } 675}; 676 677template<typename KeyT, typename ValueT, 678 unsigned InlineBuckets = 4, 679 typename KeyInfoT = DenseMapInfo<KeyT> > 680class SmallDenseMap 681 : public DenseMapBase<SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT>, 682 KeyT, ValueT, KeyInfoT> { 683 // Lift some types from the dependent base class into this class for 684 // simplicity of referring to them. 685 typedef DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT> BaseT; 686 typedef typename BaseT::BucketT BucketT; 687 friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT>; 688 689 unsigned Small : 1; 690 unsigned NumEntries : 31; 691 unsigned NumTombstones; 692 693 struct LargeRep { 694 BucketT *Buckets; 695 unsigned NumBuckets; 696 }; 697 698 /// A "union" of an inline bucket array and the struct representing 699 /// a large bucket. This union will be discriminated by the 'Small' bit. 700 AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage; 701 702public: 703 explicit SmallDenseMap(unsigned NumInitBuckets = 0) { 704 init(NumInitBuckets); 705 } 706 707 SmallDenseMap(const SmallDenseMap &other) : BaseT() { 708 init(0); 709 copyFrom(other); 710 } 711 712 SmallDenseMap(SmallDenseMap &&other) : BaseT() { 713 init(0); 714 swap(other); 715 } 716 717 template<typename InputIt> 718 SmallDenseMap(const InputIt &I, const InputIt &E) { 719 init(NextPowerOf2(std::distance(I, E))); 720 this->insert(I, E); 721 } 722 723 ~SmallDenseMap() { 724 this->destroyAll(); 725 deallocateBuckets(); 726 } 727 728 void swap(SmallDenseMap& RHS) { 729 unsigned TmpNumEntries = RHS.NumEntries; 730 RHS.NumEntries = NumEntries; 731 NumEntries = TmpNumEntries; 732 std::swap(NumTombstones, RHS.NumTombstones); 733 734 const KeyT EmptyKey = this->getEmptyKey(); 735 const KeyT TombstoneKey = this->getTombstoneKey(); 736 if (Small && RHS.Small) { 737 // If we're swapping inline bucket arrays, we have to cope with some of 738 // the tricky bits of DenseMap's storage system: the buckets are not 739 // fully initialized. Thus we swap every key, but we may have 740 // a one-directional move of the value. 741 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) { 742 BucketT *LHSB = &getInlineBuckets()[i], 743 *RHSB = &RHS.getInlineBuckets()[i]; 744 bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->first, EmptyKey) && 745 !KeyInfoT::isEqual(LHSB->first, TombstoneKey)); 746 bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->first, EmptyKey) && 747 !KeyInfoT::isEqual(RHSB->first, TombstoneKey)); 748 if (hasLHSValue && hasRHSValue) { 749 // Swap together if we can... 750 std::swap(*LHSB, *RHSB); 751 continue; 752 } 753 // Swap separately and handle any assymetry. 754 std::swap(LHSB->first, RHSB->first); 755 if (hasLHSValue) { 756 new (&RHSB->second) ValueT(std::move(LHSB->second)); 757 LHSB->second.~ValueT(); 758 } else if (hasRHSValue) { 759 new (&LHSB->second) ValueT(std::move(RHSB->second)); 760 RHSB->second.~ValueT(); 761 } 762 } 763 return; 764 } 765 if (!Small && !RHS.Small) { 766 std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets); 767 std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets); 768 return; 769 } 770 771 SmallDenseMap &SmallSide = Small ? *this : RHS; 772 SmallDenseMap &LargeSide = Small ? RHS : *this; 773 774 // First stash the large side's rep and move the small side across. 775 LargeRep TmpRep = std::move(*LargeSide.getLargeRep()); 776 LargeSide.getLargeRep()->~LargeRep(); 777 LargeSide.Small = true; 778 // This is similar to the standard move-from-old-buckets, but the bucket 779 // count hasn't actually rotated in this case. So we have to carefully 780 // move construct the keys and values into their new locations, but there 781 // is no need to re-hash things. 782 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) { 783 BucketT *NewB = &LargeSide.getInlineBuckets()[i], 784 *OldB = &SmallSide.getInlineBuckets()[i]; 785 new (&NewB->first) KeyT(std::move(OldB->first)); 786 OldB->first.~KeyT(); 787 if (!KeyInfoT::isEqual(NewB->first, EmptyKey) && 788 !KeyInfoT::isEqual(NewB->first, TombstoneKey)) { 789 new (&NewB->second) ValueT(std::move(OldB->second)); 790 OldB->second.~ValueT(); 791 } 792 } 793 794 // The hard part of moving the small buckets across is done, just move 795 // the TmpRep into its new home. 796 SmallSide.Small = false; 797 new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep)); 798 } 799 800 SmallDenseMap& operator=(const SmallDenseMap& other) { 801 copyFrom(other); 802 return *this; 803 } 804 805 SmallDenseMap& operator=(SmallDenseMap &&other) { 806 this->destroyAll(); 807 deallocateBuckets(); 808 init(0); 809 swap(other); 810 return *this; 811 } 812 813 void copyFrom(const SmallDenseMap& other) { 814 this->destroyAll(); 815 deallocateBuckets(); 816 Small = true; 817 if (other.getNumBuckets() > InlineBuckets) { 818 Small = false; 819 new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets())); 820 } 821 this->BaseT::copyFrom(other); 822 } 823 824 void init(unsigned InitBuckets) { 825 Small = true; 826 if (InitBuckets > InlineBuckets) { 827 Small = false; 828 new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets)); 829 } 830 this->BaseT::initEmpty(); 831 } 832 833 void grow(unsigned AtLeast) { 834 if (AtLeast >= InlineBuckets) 835 AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1)); 836 837 if (Small) { 838 if (AtLeast < InlineBuckets) 839 return; // Nothing to do. 840 841 // First move the inline buckets into a temporary storage. 842 AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage; 843 BucketT *TmpBegin = reinterpret_cast<BucketT *>(TmpStorage.buffer); 844 BucketT *TmpEnd = TmpBegin; 845 846 // Loop over the buckets, moving non-empty, non-tombstones into the 847 // temporary storage. Have the loop move the TmpEnd forward as it goes. 848 const KeyT EmptyKey = this->getEmptyKey(); 849 const KeyT TombstoneKey = this->getTombstoneKey(); 850 for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) { 851 if (!KeyInfoT::isEqual(P->first, EmptyKey) && 852 !KeyInfoT::isEqual(P->first, TombstoneKey)) { 853 assert(size_t(TmpEnd - TmpBegin) < InlineBuckets && 854 "Too many inline buckets!"); 855 new (&TmpEnd->first) KeyT(std::move(P->first)); 856 new (&TmpEnd->second) ValueT(std::move(P->second)); 857 ++TmpEnd; 858 P->second.~ValueT(); 859 } 860 P->first.~KeyT(); 861 } 862 863 // Now make this map use the large rep, and move all the entries back 864 // into it. 865 Small = false; 866 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast)); 867 this->moveFromOldBuckets(TmpBegin, TmpEnd); 868 return; 869 } 870 871 LargeRep OldRep = std::move(*getLargeRep()); 872 getLargeRep()->~LargeRep(); 873 if (AtLeast <= InlineBuckets) { 874 Small = true; 875 } else { 876 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast)); 877 } 878 879 this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets); 880 881 // Free the old table. 882 operator delete(OldRep.Buckets); 883 } 884 885 void shrink_and_clear() { 886 unsigned OldSize = this->size(); 887 this->destroyAll(); 888 889 // Reduce the number of buckets. 890 unsigned NewNumBuckets = 0; 891 if (OldSize) { 892 NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1); 893 if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u) 894 NewNumBuckets = 64; 895 } 896 if ((Small && NewNumBuckets <= InlineBuckets) || 897 (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) { 898 this->BaseT::initEmpty(); 899 return; 900 } 901 902 deallocateBuckets(); 903 init(NewNumBuckets); 904 } 905 906private: 907 unsigned getNumEntries() const { 908 return NumEntries; 909 } 910 void setNumEntries(unsigned Num) { 911 assert(Num < INT_MAX && "Cannot support more than INT_MAX entries"); 912 NumEntries = Num; 913 } 914 915 unsigned getNumTombstones() const { 916 return NumTombstones; 917 } 918 void setNumTombstones(unsigned Num) { 919 NumTombstones = Num; 920 } 921 922 const BucketT *getInlineBuckets() const { 923 assert(Small); 924 // Note that this cast does not violate aliasing rules as we assert that 925 // the memory's dynamic type is the small, inline bucket buffer, and the 926 // 'storage.buffer' static type is 'char *'. 927 return reinterpret_cast<const BucketT *>(storage.buffer); 928 } 929 BucketT *getInlineBuckets() { 930 return const_cast<BucketT *>( 931 const_cast<const SmallDenseMap *>(this)->getInlineBuckets()); 932 } 933 const LargeRep *getLargeRep() const { 934 assert(!Small); 935 // Note, same rule about aliasing as with getInlineBuckets. 936 return reinterpret_cast<const LargeRep *>(storage.buffer); 937 } 938 LargeRep *getLargeRep() { 939 return const_cast<LargeRep *>( 940 const_cast<const SmallDenseMap *>(this)->getLargeRep()); 941 } 942 943 const BucketT *getBuckets() const { 944 return Small ? getInlineBuckets() : getLargeRep()->Buckets; 945 } 946 BucketT *getBuckets() { 947 return const_cast<BucketT *>( 948 const_cast<const SmallDenseMap *>(this)->getBuckets()); 949 } 950 unsigned getNumBuckets() const { 951 return Small ? InlineBuckets : getLargeRep()->NumBuckets; 952 } 953 954 void deallocateBuckets() { 955 if (Small) 956 return; 957 958 operator delete(getLargeRep()->Buckets); 959 getLargeRep()->~LargeRep(); 960 } 961 962 LargeRep allocateBuckets(unsigned Num) { 963 assert(Num > InlineBuckets && "Must allocate more buckets than are inline"); 964 LargeRep Rep = { 965 static_cast<BucketT*>(operator new(sizeof(BucketT) * Num)), Num 966 }; 967 return Rep; 968 } 969}; 970 971template<typename KeyT, typename ValueT, 972 typename KeyInfoT, bool IsConst> 973class DenseMapIterator { 974 typedef std::pair<KeyT, ValueT> Bucket; 975 typedef DenseMapIterator<KeyT, ValueT, 976 KeyInfoT, true> ConstIterator; 977 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, true>; 978public: 979 typedef ptrdiff_t difference_type; 980 typedef typename std::conditional<IsConst, const Bucket, Bucket>::type 981 value_type; 982 typedef value_type *pointer; 983 typedef value_type &reference; 984 typedef std::forward_iterator_tag iterator_category; 985private: 986 pointer Ptr, End; 987public: 988 DenseMapIterator() : Ptr(0), End(0) {} 989 990 DenseMapIterator(pointer Pos, pointer E, bool NoAdvance = false) 991 : Ptr(Pos), End(E) { 992 if (!NoAdvance) AdvancePastEmptyBuckets(); 993 } 994 995 // If IsConst is true this is a converting constructor from iterator to 996 // const_iterator and the default copy constructor is used. 997 // Otherwise this is a copy constructor for iterator. 998 DenseMapIterator(const DenseMapIterator<KeyT, ValueT, 999 KeyInfoT, false>& I) 1000 : Ptr(I.Ptr), End(I.End) {} 1001 1002 reference operator*() const { 1003 return *Ptr; 1004 } 1005 pointer operator->() const { 1006 return Ptr; 1007 } 1008 1009 bool operator==(const ConstIterator &RHS) const { 1010 return Ptr == RHS.operator->(); 1011 } 1012 bool operator!=(const ConstIterator &RHS) const { 1013 return Ptr != RHS.operator->(); 1014 } 1015 1016 inline DenseMapIterator& operator++() { // Preincrement 1017 ++Ptr; 1018 AdvancePastEmptyBuckets(); 1019 return *this; 1020 } 1021 DenseMapIterator operator++(int) { // Postincrement 1022 DenseMapIterator tmp = *this; ++*this; return tmp; 1023 } 1024 1025private: 1026 void AdvancePastEmptyBuckets() { 1027 const KeyT Empty = KeyInfoT::getEmptyKey(); 1028 const KeyT Tombstone = KeyInfoT::getTombstoneKey(); 1029 1030 while (Ptr != End && 1031 (KeyInfoT::isEqual(Ptr->first, Empty) || 1032 KeyInfoT::isEqual(Ptr->first, Tombstone))) 1033 ++Ptr; 1034 } 1035}; 1036 1037template<typename KeyT, typename ValueT, typename KeyInfoT> 1038static inline size_t 1039capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) { 1040 return X.getMemorySize(); 1041} 1042 1043} // end namespace llvm 1044 1045#endif 1046