1//===-- Support/FoldingSet.cpp - Uniquing Hash Set --------------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements a hash set that can be used to remove duplication of 11// nodes in a graph. 12// 13//===----------------------------------------------------------------------===// 14 15#include "llvm/ADT/FoldingSet.h" 16#include "llvm/ADT/Hashing.h" 17#include "llvm/Support/Allocator.h" 18#include "llvm/Support/ErrorHandling.h" 19#include "llvm/Support/Host.h" 20#include "llvm/Support/MathExtras.h" 21#include <cassert> 22#include <cstring> 23using namespace llvm; 24 25//===----------------------------------------------------------------------===// 26// FoldingSetNodeIDRef Implementation 27 28/// ComputeHash - Compute a strong hash value for this FoldingSetNodeIDRef, 29/// used to lookup the node in the FoldingSetImpl. 30unsigned FoldingSetNodeIDRef::ComputeHash() const { 31 return static_cast<unsigned>(hash_combine_range(Data, Data+Size)); 32} 33 34bool FoldingSetNodeIDRef::operator==(FoldingSetNodeIDRef RHS) const { 35 if (Size != RHS.Size) return false; 36 return memcmp(Data, RHS.Data, Size*sizeof(*Data)) == 0; 37} 38 39/// Used to compare the "ordering" of two nodes as defined by the 40/// profiled bits and their ordering defined by memcmp(). 41bool FoldingSetNodeIDRef::operator<(FoldingSetNodeIDRef RHS) const { 42 if (Size != RHS.Size) 43 return Size < RHS.Size; 44 return memcmp(Data, RHS.Data, Size*sizeof(*Data)) < 0; 45} 46 47//===----------------------------------------------------------------------===// 48// FoldingSetNodeID Implementation 49 50/// Add* - Add various data types to Bit data. 51/// 52void FoldingSetNodeID::AddPointer(const void *Ptr) { 53 // Note: this adds pointers to the hash using sizes and endianness that 54 // depend on the host. It doesn't matter, however, because hashing on 55 // pointer values is inherently unstable. Nothing should depend on the 56 // ordering of nodes in the folding set. 57 Bits.append(reinterpret_cast<unsigned *>(&Ptr), 58 reinterpret_cast<unsigned *>(&Ptr+1)); 59} 60void FoldingSetNodeID::AddInteger(signed I) { 61 Bits.push_back(I); 62} 63void FoldingSetNodeID::AddInteger(unsigned I) { 64 Bits.push_back(I); 65} 66void FoldingSetNodeID::AddInteger(long I) { 67 AddInteger((unsigned long)I); 68} 69void FoldingSetNodeID::AddInteger(unsigned long I) { 70 if (sizeof(long) == sizeof(int)) 71 AddInteger(unsigned(I)); 72 else if (sizeof(long) == sizeof(long long)) { 73 AddInteger((unsigned long long)I); 74 } else { 75 llvm_unreachable("unexpected sizeof(long)"); 76 } 77} 78void FoldingSetNodeID::AddInteger(long long I) { 79 AddInteger((unsigned long long)I); 80} 81void FoldingSetNodeID::AddInteger(unsigned long long I) { 82 AddInteger(unsigned(I)); 83 if ((uint64_t)(unsigned)I != I) 84 Bits.push_back(unsigned(I >> 32)); 85} 86 87void FoldingSetNodeID::AddString(StringRef String) { 88 unsigned Size = String.size(); 89 Bits.push_back(Size); 90 if (!Size) return; 91 92 unsigned Units = Size / 4; 93 unsigned Pos = 0; 94 const unsigned *Base = (const unsigned*) String.data(); 95 96 // If the string is aligned do a bulk transfer. 97 if (!((intptr_t)Base & 3)) { 98 Bits.append(Base, Base + Units); 99 Pos = (Units + 1) * 4; 100 } else { 101 // Otherwise do it the hard way. 102 // To be compatible with above bulk transfer, we need to take endianness 103 // into account. 104 static_assert(sys::IsBigEndianHost || sys::IsLittleEndianHost, 105 "Unexpected host endianness"); 106 if (sys::IsBigEndianHost) { 107 for (Pos += 4; Pos <= Size; Pos += 4) { 108 unsigned V = ((unsigned char)String[Pos - 4] << 24) | 109 ((unsigned char)String[Pos - 3] << 16) | 110 ((unsigned char)String[Pos - 2] << 8) | 111 (unsigned char)String[Pos - 1]; 112 Bits.push_back(V); 113 } 114 } else { // Little-endian host 115 for (Pos += 4; Pos <= Size; Pos += 4) { 116 unsigned V = ((unsigned char)String[Pos - 1] << 24) | 117 ((unsigned char)String[Pos - 2] << 16) | 118 ((unsigned char)String[Pos - 3] << 8) | 119 (unsigned char)String[Pos - 4]; 120 Bits.push_back(V); 121 } 122 } 123 } 124 125 // With the leftover bits. 126 unsigned V = 0; 127 // Pos will have overshot size by 4 - #bytes left over. 128 // No need to take endianness into account here - this is always executed. 129 switch (Pos - Size) { 130 case 1: V = (V << 8) | (unsigned char)String[Size - 3]; // Fall thru. 131 case 2: V = (V << 8) | (unsigned char)String[Size - 2]; // Fall thru. 132 case 3: V = (V << 8) | (unsigned char)String[Size - 1]; break; 133 default: return; // Nothing left. 134 } 135 136 Bits.push_back(V); 137} 138 139// AddNodeID - Adds the Bit data of another ID to *this. 140void FoldingSetNodeID::AddNodeID(const FoldingSetNodeID &ID) { 141 Bits.append(ID.Bits.begin(), ID.Bits.end()); 142} 143 144/// ComputeHash - Compute a strong hash value for this FoldingSetNodeID, used to 145/// lookup the node in the FoldingSetImpl. 146unsigned FoldingSetNodeID::ComputeHash() const { 147 return FoldingSetNodeIDRef(Bits.data(), Bits.size()).ComputeHash(); 148} 149 150/// operator== - Used to compare two nodes to each other. 151/// 152bool FoldingSetNodeID::operator==(const FoldingSetNodeID &RHS) const { 153 return *this == FoldingSetNodeIDRef(RHS.Bits.data(), RHS.Bits.size()); 154} 155 156/// operator== - Used to compare two nodes to each other. 157/// 158bool FoldingSetNodeID::operator==(FoldingSetNodeIDRef RHS) const { 159 return FoldingSetNodeIDRef(Bits.data(), Bits.size()) == RHS; 160} 161 162/// Used to compare the "ordering" of two nodes as defined by the 163/// profiled bits and their ordering defined by memcmp(). 164bool FoldingSetNodeID::operator<(const FoldingSetNodeID &RHS) const { 165 return *this < FoldingSetNodeIDRef(RHS.Bits.data(), RHS.Bits.size()); 166} 167 168bool FoldingSetNodeID::operator<(FoldingSetNodeIDRef RHS) const { 169 return FoldingSetNodeIDRef(Bits.data(), Bits.size()) < RHS; 170} 171 172/// Intern - Copy this node's data to a memory region allocated from the 173/// given allocator and return a FoldingSetNodeIDRef describing the 174/// interned data. 175FoldingSetNodeIDRef 176FoldingSetNodeID::Intern(BumpPtrAllocator &Allocator) const { 177 unsigned *New = Allocator.Allocate<unsigned>(Bits.size()); 178 std::uninitialized_copy(Bits.begin(), Bits.end(), New); 179 return FoldingSetNodeIDRef(New, Bits.size()); 180} 181 182//===----------------------------------------------------------------------===// 183/// Helper functions for FoldingSetImpl. 184 185/// GetNextPtr - In order to save space, each bucket is a 186/// singly-linked-list. In order to make deletion more efficient, we make 187/// the list circular, so we can delete a node without computing its hash. 188/// The problem with this is that the start of the hash buckets are not 189/// Nodes. If NextInBucketPtr is a bucket pointer, this method returns null: 190/// use GetBucketPtr when this happens. 191static FoldingSetImpl::Node *GetNextPtr(void *NextInBucketPtr) { 192 // The low bit is set if this is the pointer back to the bucket. 193 if (reinterpret_cast<intptr_t>(NextInBucketPtr) & 1) 194 return nullptr; 195 196 return static_cast<FoldingSetImpl::Node*>(NextInBucketPtr); 197} 198 199 200/// testing. 201static void **GetBucketPtr(void *NextInBucketPtr) { 202 intptr_t Ptr = reinterpret_cast<intptr_t>(NextInBucketPtr); 203 assert((Ptr & 1) && "Not a bucket pointer"); 204 return reinterpret_cast<void**>(Ptr & ~intptr_t(1)); 205} 206 207/// GetBucketFor - Hash the specified node ID and return the hash bucket for 208/// the specified ID. 209static void **GetBucketFor(unsigned Hash, void **Buckets, unsigned NumBuckets) { 210 // NumBuckets is always a power of 2. 211 unsigned BucketNum = Hash & (NumBuckets-1); 212 return Buckets + BucketNum; 213} 214 215/// AllocateBuckets - Allocated initialized bucket memory. 216static void **AllocateBuckets(unsigned NumBuckets) { 217 void **Buckets = static_cast<void**>(calloc(NumBuckets+1, sizeof(void*))); 218 // Set the very last bucket to be a non-null "pointer". 219 Buckets[NumBuckets] = reinterpret_cast<void*>(-1); 220 return Buckets; 221} 222 223//===----------------------------------------------------------------------===// 224// FoldingSetImpl Implementation 225 226void FoldingSetImpl::anchor() {} 227 228FoldingSetImpl::FoldingSetImpl(unsigned Log2InitSize) { 229 assert(5 < Log2InitSize && Log2InitSize < 32 && 230 "Initial hash table size out of range"); 231 NumBuckets = 1 << Log2InitSize; 232 Buckets = AllocateBuckets(NumBuckets); 233 NumNodes = 0; 234} 235 236FoldingSetImpl::FoldingSetImpl(FoldingSetImpl &&Arg) 237 : Buckets(Arg.Buckets), NumBuckets(Arg.NumBuckets), NumNodes(Arg.NumNodes) { 238 Arg.Buckets = nullptr; 239 Arg.NumBuckets = 0; 240 Arg.NumNodes = 0; 241} 242 243FoldingSetImpl &FoldingSetImpl::operator=(FoldingSetImpl &&RHS) { 244 free(Buckets); // This may be null if the set is in a moved-from state. 245 Buckets = RHS.Buckets; 246 NumBuckets = RHS.NumBuckets; 247 NumNodes = RHS.NumNodes; 248 RHS.Buckets = nullptr; 249 RHS.NumBuckets = 0; 250 RHS.NumNodes = 0; 251 return *this; 252} 253 254FoldingSetImpl::~FoldingSetImpl() { 255 free(Buckets); 256} 257 258void FoldingSetImpl::clear() { 259 // Set all but the last bucket to null pointers. 260 memset(Buckets, 0, NumBuckets*sizeof(void*)); 261 262 // Set the very last bucket to be a non-null "pointer". 263 Buckets[NumBuckets] = reinterpret_cast<void*>(-1); 264 265 // Reset the node count to zero. 266 NumNodes = 0; 267} 268 269void FoldingSetImpl::GrowBucketCount(unsigned NewBucketCount) { 270 assert((NewBucketCount > NumBuckets) && "Can't shrink a folding set with GrowBucketCount"); 271 assert(isPowerOf2_32(NewBucketCount) && "Bad bucket count!"); 272 void **OldBuckets = Buckets; 273 unsigned OldNumBuckets = NumBuckets; 274 NumBuckets = NewBucketCount; 275 276 // Clear out new buckets. 277 Buckets = AllocateBuckets(NumBuckets); 278 NumNodes = 0; 279 280 // Walk the old buckets, rehashing nodes into their new place. 281 FoldingSetNodeID TempID; 282 for (unsigned i = 0; i != OldNumBuckets; ++i) { 283 void *Probe = OldBuckets[i]; 284 if (!Probe) continue; 285 while (Node *NodeInBucket = GetNextPtr(Probe)) { 286 // Figure out the next link, remove NodeInBucket from the old link. 287 Probe = NodeInBucket->getNextInBucket(); 288 NodeInBucket->SetNextInBucket(nullptr); 289 290 // Insert the node into the new bucket, after recomputing the hash. 291 InsertNode(NodeInBucket, 292 GetBucketFor(ComputeNodeHash(NodeInBucket, TempID), 293 Buckets, NumBuckets)); 294 TempID.clear(); 295 } 296 } 297 298 free(OldBuckets); 299} 300 301/// GrowHashTable - Double the size of the hash table and rehash everything. 302/// 303void FoldingSetImpl::GrowHashTable() { 304 GrowBucketCount(NumBuckets * 2); 305} 306 307void FoldingSetImpl::reserve(unsigned EltCount) { 308 // This will give us somewhere between EltCount / 2 and 309 // EltCount buckets. This puts us in the load factor 310 // range of 1.0 - 2.0. 311 if(EltCount < capacity()) 312 return; 313 GrowBucketCount(PowerOf2Floor(EltCount)); 314} 315 316/// FindNodeOrInsertPos - Look up the node specified by ID. If it exists, 317/// return it. If not, return the insertion token that will make insertion 318/// faster. 319FoldingSetImpl::Node 320*FoldingSetImpl::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 321 void *&InsertPos) { 322 unsigned IDHash = ID.ComputeHash(); 323 void **Bucket = GetBucketFor(IDHash, Buckets, NumBuckets); 324 void *Probe = *Bucket; 325 326 InsertPos = nullptr; 327 328 FoldingSetNodeID TempID; 329 while (Node *NodeInBucket = GetNextPtr(Probe)) { 330 if (NodeEquals(NodeInBucket, ID, IDHash, TempID)) 331 return NodeInBucket; 332 TempID.clear(); 333 334 Probe = NodeInBucket->getNextInBucket(); 335 } 336 337 // Didn't find the node, return null with the bucket as the InsertPos. 338 InsertPos = Bucket; 339 return nullptr; 340} 341 342/// InsertNode - Insert the specified node into the folding set, knowing that it 343/// is not already in the map. InsertPos must be obtained from 344/// FindNodeOrInsertPos. 345void FoldingSetImpl::InsertNode(Node *N, void *InsertPos) { 346 assert(!N->getNextInBucket()); 347 // Do we need to grow the hashtable? 348 if (NumNodes+1 > capacity()) { 349 GrowHashTable(); 350 FoldingSetNodeID TempID; 351 InsertPos = GetBucketFor(ComputeNodeHash(N, TempID), Buckets, NumBuckets); 352 } 353 354 ++NumNodes; 355 356 /// The insert position is actually a bucket pointer. 357 void **Bucket = static_cast<void**>(InsertPos); 358 359 void *Next = *Bucket; 360 361 // If this is the first insertion into this bucket, its next pointer will be 362 // null. Pretend as if it pointed to itself, setting the low bit to indicate 363 // that it is a pointer to the bucket. 364 if (!Next) 365 Next = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(Bucket)|1); 366 367 // Set the node's next pointer, and make the bucket point to the node. 368 N->SetNextInBucket(Next); 369 *Bucket = N; 370} 371 372/// RemoveNode - Remove a node from the folding set, returning true if one was 373/// removed or false if the node was not in the folding set. 374bool FoldingSetImpl::RemoveNode(Node *N) { 375 // Because each bucket is a circular list, we don't need to compute N's hash 376 // to remove it. 377 void *Ptr = N->getNextInBucket(); 378 if (!Ptr) return false; // Not in folding set. 379 380 --NumNodes; 381 N->SetNextInBucket(nullptr); 382 383 // Remember what N originally pointed to, either a bucket or another node. 384 void *NodeNextPtr = Ptr; 385 386 // Chase around the list until we find the node (or bucket) which points to N. 387 while (true) { 388 if (Node *NodeInBucket = GetNextPtr(Ptr)) { 389 // Advance pointer. 390 Ptr = NodeInBucket->getNextInBucket(); 391 392 // We found a node that points to N, change it to point to N's next node, 393 // removing N from the list. 394 if (Ptr == N) { 395 NodeInBucket->SetNextInBucket(NodeNextPtr); 396 return true; 397 } 398 } else { 399 void **Bucket = GetBucketPtr(Ptr); 400 Ptr = *Bucket; 401 402 // If we found that the bucket points to N, update the bucket to point to 403 // whatever is next. 404 if (Ptr == N) { 405 *Bucket = NodeNextPtr; 406 return true; 407 } 408 } 409 } 410} 411 412/// GetOrInsertNode - If there is an existing simple Node exactly 413/// equal to the specified node, return it. Otherwise, insert 'N' and it 414/// instead. 415FoldingSetImpl::Node *FoldingSetImpl::GetOrInsertNode(FoldingSetImpl::Node *N) { 416 FoldingSetNodeID ID; 417 GetNodeProfile(N, ID); 418 void *IP; 419 if (Node *E = FindNodeOrInsertPos(ID, IP)) 420 return E; 421 InsertNode(N, IP); 422 return N; 423} 424 425//===----------------------------------------------------------------------===// 426// FoldingSetIteratorImpl Implementation 427 428FoldingSetIteratorImpl::FoldingSetIteratorImpl(void **Bucket) { 429 // Skip to the first non-null non-self-cycle bucket. 430 while (*Bucket != reinterpret_cast<void*>(-1) && 431 (!*Bucket || !GetNextPtr(*Bucket))) 432 ++Bucket; 433 434 NodePtr = static_cast<FoldingSetNode*>(*Bucket); 435} 436 437void FoldingSetIteratorImpl::advance() { 438 // If there is another link within this bucket, go to it. 439 void *Probe = NodePtr->getNextInBucket(); 440 441 if (FoldingSetNode *NextNodeInBucket = GetNextPtr(Probe)) 442 NodePtr = NextNodeInBucket; 443 else { 444 // Otherwise, this is the last link in this bucket. 445 void **Bucket = GetBucketPtr(Probe); 446 447 // Skip to the next non-null non-self-cycle bucket. 448 do { 449 ++Bucket; 450 } while (*Bucket != reinterpret_cast<void*>(-1) && 451 (!*Bucket || !GetNextPtr(*Bucket))); 452 453 NodePtr = static_cast<FoldingSetNode*>(*Bucket); 454 } 455} 456 457//===----------------------------------------------------------------------===// 458// FoldingSetBucketIteratorImpl Implementation 459 460FoldingSetBucketIteratorImpl::FoldingSetBucketIteratorImpl(void **Bucket) { 461 Ptr = (!*Bucket || !GetNextPtr(*Bucket)) ? (void*) Bucket : *Bucket; 462} 463