MemoryDependenceAnalysis.cpp revision 1e8de49fe729db17f267a35c89eb6b4ae90e834f
1//===- MemoryDependenceAnalysis.cpp - Mem Deps Implementation --*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements an analysis that determines, for a given memory 11// operation, what preceding memory operations it depends on. It builds on 12// alias analysis information, and tries to provide a lazy, caching interface to 13// a common kind of alias information query. 14// 15//===----------------------------------------------------------------------===// 16 17#define DEBUG_TYPE "memdep" 18#include "llvm/Analysis/MemoryDependenceAnalysis.h" 19#include "llvm/Instructions.h" 20#include "llvm/IntrinsicInst.h" 21#include "llvm/Function.h" 22#include "llvm/Analysis/AliasAnalysis.h" 23#include "llvm/Analysis/Dominators.h" 24#include "llvm/Analysis/InstructionSimplify.h" 25#include "llvm/Analysis/MemoryBuiltins.h" 26#include "llvm/ADT/Statistic.h" 27#include "llvm/ADT/STLExtras.h" 28#include "llvm/Support/PredIteratorCache.h" 29#include "llvm/Support/Debug.h" 30using namespace llvm; 31 32STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses"); 33STATISTIC(NumCacheDirtyNonLocal, "Number of dirty cached non-local responses"); 34STATISTIC(NumUncacheNonLocal, "Number of uncached non-local responses"); 35 36STATISTIC(NumCacheNonLocalPtr, 37 "Number of fully cached non-local ptr responses"); 38STATISTIC(NumCacheDirtyNonLocalPtr, 39 "Number of cached, but dirty, non-local ptr responses"); 40STATISTIC(NumUncacheNonLocalPtr, 41 "Number of uncached non-local ptr responses"); 42STATISTIC(NumCacheCompleteNonLocalPtr, 43 "Number of block queries that were completely cached"); 44 45char MemoryDependenceAnalysis::ID = 0; 46 47// Register this pass... 48static RegisterPass<MemoryDependenceAnalysis> X("memdep", 49 "Memory Dependence Analysis", false, true); 50 51MemoryDependenceAnalysis::MemoryDependenceAnalysis() 52: FunctionPass(&ID), PredCache(0) { 53} 54MemoryDependenceAnalysis::~MemoryDependenceAnalysis() { 55} 56 57/// Clean up memory in between runs 58void MemoryDependenceAnalysis::releaseMemory() { 59 LocalDeps.clear(); 60 NonLocalDeps.clear(); 61 NonLocalPointerDeps.clear(); 62 ReverseLocalDeps.clear(); 63 ReverseNonLocalDeps.clear(); 64 ReverseNonLocalPtrDeps.clear(); 65 PredCache->clear(); 66} 67 68 69 70/// getAnalysisUsage - Does not modify anything. It uses Alias Analysis. 71/// 72void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { 73 AU.setPreservesAll(); 74 AU.addRequiredTransitive<AliasAnalysis>(); 75} 76 77bool MemoryDependenceAnalysis::runOnFunction(Function &) { 78 AA = &getAnalysis<AliasAnalysis>(); 79 if (PredCache == 0) 80 PredCache.reset(new PredIteratorCache()); 81 return false; 82} 83 84/// RemoveFromReverseMap - This is a helper function that removes Val from 85/// 'Inst's set in ReverseMap. If the set becomes empty, remove Inst's entry. 86template <typename KeyTy> 87static void RemoveFromReverseMap(DenseMap<Instruction*, 88 SmallPtrSet<KeyTy, 4> > &ReverseMap, 89 Instruction *Inst, KeyTy Val) { 90 typename DenseMap<Instruction*, SmallPtrSet<KeyTy, 4> >::iterator 91 InstIt = ReverseMap.find(Inst); 92 assert(InstIt != ReverseMap.end() && "Reverse map out of sync?"); 93 bool Found = InstIt->second.erase(Val); 94 assert(Found && "Invalid reverse map!"); Found=Found; 95 if (InstIt->second.empty()) 96 ReverseMap.erase(InstIt); 97} 98 99 100/// getCallSiteDependencyFrom - Private helper for finding the local 101/// dependencies of a call site. 102MemDepResult MemoryDependenceAnalysis:: 103getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall, 104 BasicBlock::iterator ScanIt, BasicBlock *BB) { 105 // Walk backwards through the block, looking for dependencies 106 while (ScanIt != BB->begin()) { 107 Instruction *Inst = --ScanIt; 108 109 // If this inst is a memory op, get the pointer it accessed 110 Value *Pointer = 0; 111 uint64_t PointerSize = 0; 112 if (StoreInst *S = dyn_cast<StoreInst>(Inst)) { 113 Pointer = S->getPointerOperand(); 114 PointerSize = AA->getTypeStoreSize(S->getOperand(0)->getType()); 115 } else if (VAArgInst *V = dyn_cast<VAArgInst>(Inst)) { 116 Pointer = V->getOperand(0); 117 PointerSize = AA->getTypeStoreSize(V->getType()); 118 } else if (isFreeCall(Inst)) { 119 Pointer = Inst->getOperand(1); 120 // calls to free() erase the entire structure 121 PointerSize = ~0ULL; 122 } else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) { 123 // Debug intrinsics don't cause dependences. 124 if (isa<DbgInfoIntrinsic>(Inst)) continue; 125 CallSite InstCS = CallSite::get(Inst); 126 // If these two calls do not interfere, look past it. 127 switch (AA->getModRefInfo(CS, InstCS)) { 128 case AliasAnalysis::NoModRef: 129 // If the two calls don't interact (e.g. InstCS is readnone) keep 130 // scanning. 131 continue; 132 case AliasAnalysis::Ref: 133 // If the two calls read the same memory locations and CS is a readonly 134 // function, then we have two cases: 1) the calls may not interfere with 135 // each other at all. 2) the calls may produce the same value. In case 136 // #1 we want to ignore the values, in case #2, we want to return Inst 137 // as a Def dependence. This allows us to CSE in cases like: 138 // X = strlen(P); 139 // memchr(...); 140 // Y = strlen(P); // Y = X 141 if (isReadOnlyCall) { 142 if (CS.getCalledFunction() != 0 && 143 CS.getCalledFunction() == InstCS.getCalledFunction()) 144 return MemDepResult::getDef(Inst); 145 // Ignore unrelated read/read call dependences. 146 continue; 147 } 148 // FALL THROUGH 149 default: 150 return MemDepResult::getClobber(Inst); 151 } 152 } else { 153 // Non-memory instruction. 154 continue; 155 } 156 157 if (AA->getModRefInfo(CS, Pointer, PointerSize) != AliasAnalysis::NoModRef) 158 return MemDepResult::getClobber(Inst); 159 } 160 161 // No dependence found. If this is the entry block of the function, it is a 162 // clobber, otherwise it is non-local. 163 if (BB != &BB->getParent()->getEntryBlock()) 164 return MemDepResult::getNonLocal(); 165 return MemDepResult::getClobber(ScanIt); 166} 167 168/// getPointerDependencyFrom - Return the instruction on which a memory 169/// location depends. If isLoad is true, this routine ignore may-aliases with 170/// read-only operations. 171MemDepResult MemoryDependenceAnalysis:: 172getPointerDependencyFrom(Value *MemPtr, uint64_t MemSize, bool isLoad, 173 BasicBlock::iterator ScanIt, BasicBlock *BB) { 174 175 Value *InvariantTag = 0; 176 177 // Walk backwards through the basic block, looking for dependencies. 178 while (ScanIt != BB->begin()) { 179 Instruction *Inst = --ScanIt; 180 181 // If we're in an invariant region, no dependencies can be found before 182 // we pass an invariant-begin marker. 183 if (InvariantTag == Inst) { 184 InvariantTag = 0; 185 continue; 186 } 187 188 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 189 // If we pass an invariant-end marker, then we've just entered an 190 // invariant region and can start ignoring dependencies. 191 if (II->getIntrinsicID() == Intrinsic::invariant_end) { 192 uint64_t InvariantSize = ~0ULL; 193 if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getOperand(2))) 194 InvariantSize = CI->getZExtValue(); 195 196 AliasAnalysis::AliasResult R = 197 AA->alias(II->getOperand(3), InvariantSize, MemPtr, MemSize); 198 if (R == AliasAnalysis::MustAlias) { 199 InvariantTag = II->getOperand(1); 200 continue; 201 } 202 203 // If we reach a lifetime begin or end marker, then the query ends here 204 // because the value is undefined. 205 } else if (II->getIntrinsicID() == Intrinsic::lifetime_start || 206 II->getIntrinsicID() == Intrinsic::lifetime_end) { 207 uint64_t InvariantSize = ~0ULL; 208 if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getOperand(1))) 209 InvariantSize = CI->getZExtValue(); 210 211 AliasAnalysis::AliasResult R = 212 AA->alias(II->getOperand(2), InvariantSize, MemPtr, MemSize); 213 if (R == AliasAnalysis::MustAlias) 214 return MemDepResult::getDef(II); 215 } 216 } 217 218 // If we're querying on a load and we're in an invariant region, we're done 219 // at this point. Nothing a load depends on can live in an invariant region. 220 if (isLoad && InvariantTag) continue; 221 222 // Debug intrinsics don't cause dependences. 223 if (isa<DbgInfoIntrinsic>(Inst)) continue; 224 225 // Values depend on loads if the pointers are must aliased. This means that 226 // a load depends on another must aliased load from the same value. 227 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 228 Value *Pointer = LI->getPointerOperand(); 229 uint64_t PointerSize = AA->getTypeStoreSize(LI->getType()); 230 231 // If we found a pointer, check if it could be the same as our pointer. 232 AliasAnalysis::AliasResult R = 233 AA->alias(Pointer, PointerSize, MemPtr, MemSize); 234 if (R == AliasAnalysis::NoAlias) 235 continue; 236 237 // May-alias loads don't depend on each other without a dependence. 238 if (isLoad && R == AliasAnalysis::MayAlias) 239 continue; 240 // Stores depend on may and must aliased loads, loads depend on must-alias 241 // loads. 242 return MemDepResult::getDef(Inst); 243 } 244 245 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 246 // There can't be stores to the value we care about inside an 247 // invariant region. 248 if (InvariantTag) continue; 249 250 // If alias analysis can tell that this store is guaranteed to not modify 251 // the query pointer, ignore it. Use getModRefInfo to handle cases where 252 // the query pointer points to constant memory etc. 253 if (AA->getModRefInfo(SI, MemPtr, MemSize) == AliasAnalysis::NoModRef) 254 continue; 255 256 // Ok, this store might clobber the query pointer. Check to see if it is 257 // a must alias: in this case, we want to return this as a def. 258 Value *Pointer = SI->getPointerOperand(); 259 uint64_t PointerSize = AA->getTypeStoreSize(SI->getOperand(0)->getType()); 260 261 // If we found a pointer, check if it could be the same as our pointer. 262 AliasAnalysis::AliasResult R = 263 AA->alias(Pointer, PointerSize, MemPtr, MemSize); 264 265 if (R == AliasAnalysis::NoAlias) 266 continue; 267 if (R == AliasAnalysis::MayAlias) 268 return MemDepResult::getClobber(Inst); 269 return MemDepResult::getDef(Inst); 270 } 271 272 // If this is an allocation, and if we know that the accessed pointer is to 273 // the allocation, return Def. This means that there is no dependence and 274 // the access can be optimized based on that. For example, a load could 275 // turn into undef. 276 // Note: Only determine this to be a malloc if Inst is the malloc call, not 277 // a subsequent bitcast of the malloc call result. There can be stores to 278 // the malloced memory between the malloc call and its bitcast uses, and we 279 // need to continue scanning until the malloc call. 280 if (isa<AllocaInst>(Inst) || extractMallocCall(Inst)) { 281 Value *AccessPtr = MemPtr->getUnderlyingObject(); 282 283 if (AccessPtr == Inst || 284 AA->alias(Inst, 1, AccessPtr, 1) == AliasAnalysis::MustAlias) 285 return MemDepResult::getDef(Inst); 286 continue; 287 } 288 289 // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer. 290 switch (AA->getModRefInfo(Inst, MemPtr, MemSize)) { 291 case AliasAnalysis::NoModRef: 292 // If the call has no effect on the queried pointer, just ignore it. 293 continue; 294 case AliasAnalysis::Mod: 295 // If we're in an invariant region, we can ignore calls that ONLY 296 // modify the pointer. 297 if (InvariantTag) continue; 298 return MemDepResult::getClobber(Inst); 299 case AliasAnalysis::Ref: 300 // If the call is known to never store to the pointer, and if this is a 301 // load query, we can safely ignore it (scan past it). 302 if (isLoad) 303 continue; 304 default: 305 // Otherwise, there is a potential dependence. Return a clobber. 306 return MemDepResult::getClobber(Inst); 307 } 308 } 309 310 // No dependence found. If this is the entry block of the function, it is a 311 // clobber, otherwise it is non-local. 312 if (BB != &BB->getParent()->getEntryBlock()) 313 return MemDepResult::getNonLocal(); 314 return MemDepResult::getClobber(ScanIt); 315} 316 317/// getDependency - Return the instruction on which a memory operation 318/// depends. 319MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) { 320 Instruction *ScanPos = QueryInst; 321 322 // Check for a cached result 323 MemDepResult &LocalCache = LocalDeps[QueryInst]; 324 325 // If the cached entry is non-dirty, just return it. Note that this depends 326 // on MemDepResult's default constructing to 'dirty'. 327 if (!LocalCache.isDirty()) 328 return LocalCache; 329 330 // Otherwise, if we have a dirty entry, we know we can start the scan at that 331 // instruction, which may save us some work. 332 if (Instruction *Inst = LocalCache.getInst()) { 333 ScanPos = Inst; 334 335 RemoveFromReverseMap(ReverseLocalDeps, Inst, QueryInst); 336 } 337 338 BasicBlock *QueryParent = QueryInst->getParent(); 339 340 Value *MemPtr = 0; 341 uint64_t MemSize = 0; 342 343 // Do the scan. 344 if (BasicBlock::iterator(QueryInst) == QueryParent->begin()) { 345 // No dependence found. If this is the entry block of the function, it is a 346 // clobber, otherwise it is non-local. 347 if (QueryParent != &QueryParent->getParent()->getEntryBlock()) 348 LocalCache = MemDepResult::getNonLocal(); 349 else 350 LocalCache = MemDepResult::getClobber(QueryInst); 351 } else if (StoreInst *SI = dyn_cast<StoreInst>(QueryInst)) { 352 // If this is a volatile store, don't mess around with it. Just return the 353 // previous instruction as a clobber. 354 if (SI->isVolatile()) 355 LocalCache = MemDepResult::getClobber(--BasicBlock::iterator(ScanPos)); 356 else { 357 MemPtr = SI->getPointerOperand(); 358 MemSize = AA->getTypeStoreSize(SI->getOperand(0)->getType()); 359 } 360 } else if (LoadInst *LI = dyn_cast<LoadInst>(QueryInst)) { 361 // If this is a volatile load, don't mess around with it. Just return the 362 // previous instruction as a clobber. 363 if (LI->isVolatile()) 364 LocalCache = MemDepResult::getClobber(--BasicBlock::iterator(ScanPos)); 365 else { 366 MemPtr = LI->getPointerOperand(); 367 MemSize = AA->getTypeStoreSize(LI->getType()); 368 } 369 } else if (isFreeCall(QueryInst)) { 370 MemPtr = QueryInst->getOperand(1); 371 // calls to free() erase the entire structure, not just a field. 372 MemSize = ~0UL; 373 } else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) { 374 int IntrinsicID = 0; // Intrinsic IDs start at 1. 375 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(QueryInst)) 376 IntrinsicID = II->getIntrinsicID(); 377 378 switch (IntrinsicID) { 379 case Intrinsic::lifetime_start: 380 case Intrinsic::lifetime_end: 381 case Intrinsic::invariant_start: 382 MemPtr = QueryInst->getOperand(2); 383 MemSize = cast<ConstantInt>(QueryInst->getOperand(1))->getZExtValue(); 384 break; 385 case Intrinsic::invariant_end: 386 MemPtr = QueryInst->getOperand(3); 387 MemSize = cast<ConstantInt>(QueryInst->getOperand(2))->getZExtValue(); 388 break; 389 default: 390 CallSite QueryCS = CallSite::get(QueryInst); 391 bool isReadOnly = AA->onlyReadsMemory(QueryCS); 392 LocalCache = getCallSiteDependencyFrom(QueryCS, isReadOnly, ScanPos, 393 QueryParent); 394 } 395 } else { 396 // Non-memory instruction. 397 LocalCache = MemDepResult::getClobber(--BasicBlock::iterator(ScanPos)); 398 } 399 400 // If we need to do a pointer scan, make it happen. 401 if (MemPtr) { 402 bool isLoad = !QueryInst->mayWriteToMemory(); 403 if (IntrinsicInst *II = dyn_cast<MemoryUseIntrinsic>(QueryInst)) { 404 isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_end; 405 } 406 LocalCache = getPointerDependencyFrom(MemPtr, MemSize, isLoad, ScanPos, 407 QueryParent); 408 } 409 410 // Remember the result! 411 if (Instruction *I = LocalCache.getInst()) 412 ReverseLocalDeps[I].insert(QueryInst); 413 414 return LocalCache; 415} 416 417#ifndef NDEBUG 418/// AssertSorted - This method is used when -debug is specified to verify that 419/// cache arrays are properly kept sorted. 420static void AssertSorted(MemoryDependenceAnalysis::NonLocalDepInfo &Cache, 421 int Count = -1) { 422 if (Count == -1) Count = Cache.size(); 423 if (Count == 0) return; 424 425 for (unsigned i = 1; i != unsigned(Count); ++i) 426 assert(Cache[i-1] <= Cache[i] && "Cache isn't sorted!"); 427} 428#endif 429 430/// getNonLocalCallDependency - Perform a full dependency query for the 431/// specified call, returning the set of blocks that the value is 432/// potentially live across. The returned set of results will include a 433/// "NonLocal" result for all blocks where the value is live across. 434/// 435/// This method assumes the instruction returns a "NonLocal" dependency 436/// within its own block. 437/// 438/// This returns a reference to an internal data structure that may be 439/// invalidated on the next non-local query or when an instruction is 440/// removed. Clients must copy this data if they want it around longer than 441/// that. 442const MemoryDependenceAnalysis::NonLocalDepInfo & 443MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) { 444 assert(getDependency(QueryCS.getInstruction()).isNonLocal() && 445 "getNonLocalCallDependency should only be used on calls with non-local deps!"); 446 PerInstNLInfo &CacheP = NonLocalDeps[QueryCS.getInstruction()]; 447 NonLocalDepInfo &Cache = CacheP.first; 448 449 /// DirtyBlocks - This is the set of blocks that need to be recomputed. In 450 /// the cached case, this can happen due to instructions being deleted etc. In 451 /// the uncached case, this starts out as the set of predecessors we care 452 /// about. 453 SmallVector<BasicBlock*, 32> DirtyBlocks; 454 455 if (!Cache.empty()) { 456 // Okay, we have a cache entry. If we know it is not dirty, just return it 457 // with no computation. 458 if (!CacheP.second) { 459 NumCacheNonLocal++; 460 return Cache; 461 } 462 463 // If we already have a partially computed set of results, scan them to 464 // determine what is dirty, seeding our initial DirtyBlocks worklist. 465 for (NonLocalDepInfo::iterator I = Cache.begin(), E = Cache.end(); 466 I != E; ++I) 467 if (I->second.isDirty()) 468 DirtyBlocks.push_back(I->first); 469 470 // Sort the cache so that we can do fast binary search lookups below. 471 std::sort(Cache.begin(), Cache.end()); 472 473 ++NumCacheDirtyNonLocal; 474 //cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: " 475 // << Cache.size() << " cached: " << *QueryInst; 476 } else { 477 // Seed DirtyBlocks with each of the preds of QueryInst's block. 478 BasicBlock *QueryBB = QueryCS.getInstruction()->getParent(); 479 for (BasicBlock **PI = PredCache->GetPreds(QueryBB); *PI; ++PI) 480 DirtyBlocks.push_back(*PI); 481 NumUncacheNonLocal++; 482 } 483 484 // isReadonlyCall - If this is a read-only call, we can be more aggressive. 485 bool isReadonlyCall = AA->onlyReadsMemory(QueryCS); 486 487 SmallPtrSet<BasicBlock*, 64> Visited; 488 489 unsigned NumSortedEntries = Cache.size(); 490 DEBUG(AssertSorted(Cache)); 491 492 // Iterate while we still have blocks to update. 493 while (!DirtyBlocks.empty()) { 494 BasicBlock *DirtyBB = DirtyBlocks.back(); 495 DirtyBlocks.pop_back(); 496 497 // Already processed this block? 498 if (!Visited.insert(DirtyBB)) 499 continue; 500 501 // Do a binary search to see if we already have an entry for this block in 502 // the cache set. If so, find it. 503 DEBUG(AssertSorted(Cache, NumSortedEntries)); 504 NonLocalDepInfo::iterator Entry = 505 std::upper_bound(Cache.begin(), Cache.begin()+NumSortedEntries, 506 std::make_pair(DirtyBB, MemDepResult())); 507 if (Entry != Cache.begin() && prior(Entry)->first == DirtyBB) 508 --Entry; 509 510 MemDepResult *ExistingResult = 0; 511 if (Entry != Cache.begin()+NumSortedEntries && 512 Entry->first == DirtyBB) { 513 // If we already have an entry, and if it isn't already dirty, the block 514 // is done. 515 if (!Entry->second.isDirty()) 516 continue; 517 518 // Otherwise, remember this slot so we can update the value. 519 ExistingResult = &Entry->second; 520 } 521 522 // If the dirty entry has a pointer, start scanning from it so we don't have 523 // to rescan the entire block. 524 BasicBlock::iterator ScanPos = DirtyBB->end(); 525 if (ExistingResult) { 526 if (Instruction *Inst = ExistingResult->getInst()) { 527 ScanPos = Inst; 528 // We're removing QueryInst's use of Inst. 529 RemoveFromReverseMap(ReverseNonLocalDeps, Inst, 530 QueryCS.getInstruction()); 531 } 532 } 533 534 // Find out if this block has a local dependency for QueryInst. 535 MemDepResult Dep; 536 537 if (ScanPos != DirtyBB->begin()) { 538 Dep = getCallSiteDependencyFrom(QueryCS, isReadonlyCall,ScanPos, DirtyBB); 539 } else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) { 540 // No dependence found. If this is the entry block of the function, it is 541 // a clobber, otherwise it is non-local. 542 Dep = MemDepResult::getNonLocal(); 543 } else { 544 Dep = MemDepResult::getClobber(ScanPos); 545 } 546 547 // If we had a dirty entry for the block, update it. Otherwise, just add 548 // a new entry. 549 if (ExistingResult) 550 *ExistingResult = Dep; 551 else 552 Cache.push_back(std::make_pair(DirtyBB, Dep)); 553 554 // If the block has a dependency (i.e. it isn't completely transparent to 555 // the value), remember the association! 556 if (!Dep.isNonLocal()) { 557 // Keep the ReverseNonLocalDeps map up to date so we can efficiently 558 // update this when we remove instructions. 559 if (Instruction *Inst = Dep.getInst()) 560 ReverseNonLocalDeps[Inst].insert(QueryCS.getInstruction()); 561 } else { 562 563 // If the block *is* completely transparent to the load, we need to check 564 // the predecessors of this block. Add them to our worklist. 565 for (BasicBlock **PI = PredCache->GetPreds(DirtyBB); *PI; ++PI) 566 DirtyBlocks.push_back(*PI); 567 } 568 } 569 570 return Cache; 571} 572 573/// getNonLocalPointerDependency - Perform a full dependency query for an 574/// access to the specified (non-volatile) memory location, returning the 575/// set of instructions that either define or clobber the value. 576/// 577/// This method assumes the pointer has a "NonLocal" dependency within its 578/// own block. 579/// 580void MemoryDependenceAnalysis:: 581getNonLocalPointerDependency(Value *Pointer, bool isLoad, BasicBlock *FromBB, 582 SmallVectorImpl<NonLocalDepEntry> &Result) { 583 assert(isa<PointerType>(Pointer->getType()) && 584 "Can't get pointer deps of a non-pointer!"); 585 Result.clear(); 586 587 // We know that the pointer value is live into FromBB find the def/clobbers 588 // from presecessors. 589 const Type *EltTy = cast<PointerType>(Pointer->getType())->getElementType(); 590 uint64_t PointeeSize = AA->getTypeStoreSize(EltTy); 591 592 // This is the set of blocks we've inspected, and the pointer we consider in 593 // each block. Because of critical edges, we currently bail out if querying 594 // a block with multiple different pointers. This can happen during PHI 595 // translation. 596 DenseMap<BasicBlock*, Value*> Visited; 597 if (!getNonLocalPointerDepFromBB(Pointer, PointeeSize, isLoad, FromBB, 598 Result, Visited, true)) 599 return; 600 Result.clear(); 601 Result.push_back(std::make_pair(FromBB, 602 MemDepResult::getClobber(FromBB->begin()))); 603} 604 605/// GetNonLocalInfoForBlock - Compute the memdep value for BB with 606/// Pointer/PointeeSize using either cached information in Cache or by doing a 607/// lookup (which may use dirty cache info if available). If we do a lookup, 608/// add the result to the cache. 609MemDepResult MemoryDependenceAnalysis:: 610GetNonLocalInfoForBlock(Value *Pointer, uint64_t PointeeSize, 611 bool isLoad, BasicBlock *BB, 612 NonLocalDepInfo *Cache, unsigned NumSortedEntries) { 613 614 // Do a binary search to see if we already have an entry for this block in 615 // the cache set. If so, find it. 616 NonLocalDepInfo::iterator Entry = 617 std::upper_bound(Cache->begin(), Cache->begin()+NumSortedEntries, 618 std::make_pair(BB, MemDepResult())); 619 if (Entry != Cache->begin() && prior(Entry)->first == BB) 620 --Entry; 621 622 MemDepResult *ExistingResult = 0; 623 if (Entry != Cache->begin()+NumSortedEntries && Entry->first == BB) 624 ExistingResult = &Entry->second; 625 626 // If we have a cached entry, and it is non-dirty, use it as the value for 627 // this dependency. 628 if (ExistingResult && !ExistingResult->isDirty()) { 629 ++NumCacheNonLocalPtr; 630 return *ExistingResult; 631 } 632 633 // Otherwise, we have to scan for the value. If we have a dirty cache 634 // entry, start scanning from its position, otherwise we scan from the end 635 // of the block. 636 BasicBlock::iterator ScanPos = BB->end(); 637 if (ExistingResult && ExistingResult->getInst()) { 638 assert(ExistingResult->getInst()->getParent() == BB && 639 "Instruction invalidated?"); 640 ++NumCacheDirtyNonLocalPtr; 641 ScanPos = ExistingResult->getInst(); 642 643 // Eliminating the dirty entry from 'Cache', so update the reverse info. 644 ValueIsLoadPair CacheKey(Pointer, isLoad); 645 RemoveFromReverseMap(ReverseNonLocalPtrDeps, ScanPos, CacheKey); 646 } else { 647 ++NumUncacheNonLocalPtr; 648 } 649 650 // Scan the block for the dependency. 651 MemDepResult Dep = getPointerDependencyFrom(Pointer, PointeeSize, isLoad, 652 ScanPos, BB); 653 654 // If we had a dirty entry for the block, update it. Otherwise, just add 655 // a new entry. 656 if (ExistingResult) 657 *ExistingResult = Dep; 658 else 659 Cache->push_back(std::make_pair(BB, Dep)); 660 661 // If the block has a dependency (i.e. it isn't completely transparent to 662 // the value), remember the reverse association because we just added it 663 // to Cache! 664 if (Dep.isNonLocal()) 665 return Dep; 666 667 // Keep the ReverseNonLocalPtrDeps map up to date so we can efficiently 668 // update MemDep when we remove instructions. 669 Instruction *Inst = Dep.getInst(); 670 assert(Inst && "Didn't depend on anything?"); 671 ValueIsLoadPair CacheKey(Pointer, isLoad); 672 ReverseNonLocalPtrDeps[Inst].insert(CacheKey); 673 return Dep; 674} 675 676/// SortNonLocalDepInfoCache - Sort the a NonLocalDepInfo cache, given a certain 677/// number of elements in the array that are already properly ordered. This is 678/// optimized for the case when only a few entries are added. 679static void 680SortNonLocalDepInfoCache(MemoryDependenceAnalysis::NonLocalDepInfo &Cache, 681 unsigned NumSortedEntries) { 682 switch (Cache.size() - NumSortedEntries) { 683 case 0: 684 // done, no new entries. 685 break; 686 case 2: { 687 // Two new entries, insert the last one into place. 688 MemoryDependenceAnalysis::NonLocalDepEntry Val = Cache.back(); 689 Cache.pop_back(); 690 MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry = 691 std::upper_bound(Cache.begin(), Cache.end()-1, Val); 692 Cache.insert(Entry, Val); 693 // FALL THROUGH. 694 } 695 case 1: 696 // One new entry, Just insert the new value at the appropriate position. 697 if (Cache.size() != 1) { 698 MemoryDependenceAnalysis::NonLocalDepEntry Val = Cache.back(); 699 Cache.pop_back(); 700 MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry = 701 std::upper_bound(Cache.begin(), Cache.end(), Val); 702 Cache.insert(Entry, Val); 703 } 704 break; 705 default: 706 // Added many values, do a full scale sort. 707 std::sort(Cache.begin(), Cache.end()); 708 break; 709 } 710} 711 712/// isPHITranslatable - Return true if the specified computation is derived from 713/// a PHI node in the current block and if it is simple enough for us to handle. 714static bool isPHITranslatable(Instruction *Inst) { 715 if (isa<PHINode>(Inst)) 716 return true; 717 718 // We can handle bitcast of a PHI, but the PHI needs to be in the same block 719 // as the bitcast. 720 if (BitCastInst *BC = dyn_cast<BitCastInst>(Inst)) { 721 Instruction *OpI = dyn_cast<Instruction>(BC->getOperand(0)); 722 if (OpI == 0 || OpI->getParent() != Inst->getParent()) 723 return true; 724 return isPHITranslatable(OpI); 725 } 726 727 // We can translate a GEP if all of its operands defined in this block are phi 728 // translatable. 729 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Inst)) { 730 for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i) { 731 Instruction *OpI = dyn_cast<Instruction>(GEP->getOperand(i)); 732 if (OpI == 0 || OpI->getParent() != Inst->getParent()) 733 continue; 734 735 if (!isPHITranslatable(OpI)) 736 return false; 737 } 738 return true; 739 } 740 741 if (Inst->getOpcode() == Instruction::Add && 742 isa<ConstantInt>(Inst->getOperand(1))) { 743 Instruction *OpI = dyn_cast<Instruction>(Inst->getOperand(0)); 744 if (OpI == 0 || OpI->getParent() != Inst->getParent()) 745 return true; 746 return isPHITranslatable(OpI); 747 } 748 749 // cerr << "MEMDEP: Could not PHI translate: " << *Pointer; 750 // if (isa<BitCastInst>(PtrInst) || isa<GetElementPtrInst>(PtrInst)) 751 // cerr << "OP:\t\t\t\t" << *PtrInst->getOperand(0); 752 753 return false; 754} 755 756/// GetPHITranslatedValue - Given a computation that satisfied the 757/// isPHITranslatable predicate, see if we can translate the computation into 758/// the specified predecessor block. If so, return that value. 759Value *MemoryDependenceAnalysis:: 760GetPHITranslatedValue(Value *InVal, BasicBlock *CurBB, BasicBlock *Pred, 761 const TargetData *TD) const { 762 // If the input value is not an instruction, or if it is not defined in CurBB, 763 // then we don't need to phi translate it. 764 Instruction *Inst = dyn_cast<Instruction>(InVal); 765 if (Inst == 0 || Inst->getParent() != CurBB) 766 return InVal; 767 768 if (PHINode *PN = dyn_cast<PHINode>(Inst)) 769 return PN->getIncomingValueForBlock(Pred); 770 771 // Handle bitcast of PHI. 772 if (BitCastInst *BC = dyn_cast<BitCastInst>(Inst)) { 773 // PHI translate the input operand. 774 Value *PHIIn = GetPHITranslatedValue(BC->getOperand(0), CurBB, Pred, TD); 775 if (PHIIn == 0) return 0; 776 777 // Constants are trivial to phi translate. 778 if (Constant *C = dyn_cast<Constant>(PHIIn)) 779 return ConstantExpr::getBitCast(C, BC->getType()); 780 781 // Otherwise we have to see if a bitcasted version of the incoming pointer 782 // is available. If so, we can use it, otherwise we have to fail. 783 for (Value::use_iterator UI = PHIIn->use_begin(), E = PHIIn->use_end(); 784 UI != E; ++UI) { 785 if (BitCastInst *BCI = dyn_cast<BitCastInst>(*UI)) 786 if (BCI->getType() == BC->getType()) 787 return BCI; 788 } 789 return 0; 790 } 791 792 // Handle getelementptr with at least one PHI translatable operand. 793 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Inst)) { 794 SmallVector<Value*, 8> GEPOps; 795 BasicBlock *CurBB = GEP->getParent(); 796 for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i) { 797 Value *GEPOp = GEP->getOperand(i); 798 // No PHI translation is needed of operands whose values are live in to 799 // the predecessor block. 800 if (!isa<Instruction>(GEPOp) || 801 cast<Instruction>(GEPOp)->getParent() != CurBB) { 802 GEPOps.push_back(GEPOp); 803 continue; 804 } 805 806 // If the operand is a phi node, do phi translation. 807 Value *InOp = GetPHITranslatedValue(GEPOp, CurBB, Pred, TD); 808 if (InOp == 0) return 0; 809 810 GEPOps.push_back(InOp); 811 } 812 813 // Simplify the GEP to handle 'gep x, 0' -> x etc. 814 if (Value *V = SimplifyGEPInst(&GEPOps[0], GEPOps.size(), TD)) 815 return V; 816 817 // Scan to see if we have this GEP available. 818 Value *APHIOp = GEPOps[0]; 819 for (Value::use_iterator UI = APHIOp->use_begin(), E = APHIOp->use_end(); 820 UI != E; ++UI) { 821 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(*UI)) 822 if (GEPI->getType() == GEP->getType() && 823 GEPI->getNumOperands() == GEPOps.size() && 824 GEPI->getParent()->getParent() == CurBB->getParent()) { 825 bool Mismatch = false; 826 for (unsigned i = 0, e = GEPOps.size(); i != e; ++i) 827 if (GEPI->getOperand(i) != GEPOps[i]) { 828 Mismatch = true; 829 break; 830 } 831 if (!Mismatch) 832 return GEPI; 833 } 834 } 835 return 0; 836 } 837 838 // Handle add with a constant RHS. 839 if (Inst->getOpcode() == Instruction::Add && 840 isa<ConstantInt>(Inst->getOperand(1))) { 841 // PHI translate the LHS. 842 Value *LHS; 843 Constant *RHS = cast<ConstantInt>(Inst->getOperand(1)); 844 Instruction *OpI = dyn_cast<Instruction>(Inst->getOperand(0)); 845 bool isNSW = cast<BinaryOperator>(Inst)->hasNoSignedWrap(); 846 bool isNUW = cast<BinaryOperator>(Inst)->hasNoUnsignedWrap(); 847 848 if (OpI == 0 || OpI->getParent() != Inst->getParent()) 849 LHS = Inst->getOperand(0); 850 else { 851 LHS = GetPHITranslatedValue(Inst->getOperand(0), CurBB, Pred, TD); 852 if (LHS == 0) 853 return 0; 854 } 855 856 // If the PHI translated LHS is an add of a constant, fold the immediates. 857 if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(LHS)) 858 if (BOp->getOpcode() == Instruction::Add) 859 if (ConstantInt *CI = dyn_cast<ConstantInt>(BOp->getOperand(1))) { 860 LHS = BOp->getOperand(0); 861 RHS = ConstantExpr::getAdd(RHS, CI); 862 isNSW = isNUW = false; 863 } 864 865 // See if the add simplifies away. 866 if (Value *Res = SimplifyAddInst(LHS, RHS, isNSW, isNUW, TD)) 867 return Res; 868 869 // Otherwise, see if we have this add available somewhere. 870 for (Value::use_iterator UI = LHS->use_begin(), E = LHS->use_end(); 871 UI != E; ++UI) { 872 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(*UI)) 873 if (BO->getOperand(0) == LHS && BO->getOperand(1) == RHS && 874 BO->getParent()->getParent() == CurBB->getParent()) 875 return BO; 876 } 877 878 return 0; 879 } 880 881 return 0; 882} 883 884/// GetAvailablePHITranslatePointer - Return the value computed by 885/// PHITranslatePointer if it dominates PredBB, otherwise return null. 886Value *MemoryDependenceAnalysis:: 887GetAvailablePHITranslatedValue(Value *V, 888 BasicBlock *CurBB, BasicBlock *PredBB, 889 const TargetData *TD, 890 const DominatorTree &DT) const { 891 // See if PHI translation succeeds. 892 V = GetPHITranslatedValue(V, CurBB, PredBB, TD); 893 if (V == 0) return 0; 894 895 // Make sure the value is live in the predecessor. 896 if (Instruction *Inst = dyn_cast_or_null<Instruction>(V)) 897 if (!DT.dominates(Inst->getParent(), PredBB)) 898 return 0; 899 return V; 900} 901 902 903/// InsertPHITranslatedPointer - Insert a computation of the PHI translated 904/// version of 'V' for the edge PredBB->CurBB into the end of the PredBB 905/// block. All newly created instructions are added to the NewInsts list. 906/// 907Value *MemoryDependenceAnalysis:: 908InsertPHITranslatedPointer(Value *InVal, BasicBlock *CurBB, 909 BasicBlock *PredBB, const TargetData *TD, 910 const DominatorTree &DT, 911 SmallVectorImpl<Instruction*> &NewInsts) const { 912 // See if we have a version of this value already available and dominating 913 // PredBB. If so, there is no need to insert a new copy. 914 if (Value *Res = GetAvailablePHITranslatedValue(InVal, CurBB, PredBB, TD, DT)) 915 return Res; 916 917 // If we don't have an available version of this value, it must be an 918 // instruction. 919 Instruction *Inst = cast<Instruction>(InVal); 920 921 // Handle bitcast of PHI translatable value. 922 if (BitCastInst *BC = dyn_cast<BitCastInst>(Inst)) { 923 Value *OpVal = InsertPHITranslatedPointer(BC->getOperand(0), 924 CurBB, PredBB, TD, DT, NewInsts); 925 if (OpVal == 0) return 0; 926 927 // Otherwise insert a bitcast at the end of PredBB. 928 BitCastInst *New = new BitCastInst(OpVal, InVal->getType(), 929 InVal->getName()+".phi.trans.insert", 930 PredBB->getTerminator()); 931 NewInsts.push_back(New); 932 return New; 933 } 934 935 // Handle getelementptr with at least one PHI operand. 936 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Inst)) { 937 SmallVector<Value*, 8> GEPOps; 938 BasicBlock *CurBB = GEP->getParent(); 939 for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i) { 940 Value *OpVal = InsertPHITranslatedPointer(GEP->getOperand(i), 941 CurBB, PredBB, TD, DT, NewInsts); 942 if (OpVal == 0) return 0; 943 GEPOps.push_back(OpVal); 944 } 945 946 GetElementPtrInst *Result = 947 GetElementPtrInst::Create(GEPOps[0], GEPOps.begin()+1, GEPOps.end(), 948 InVal->getName()+".phi.trans.insert", 949 PredBB->getTerminator()); 950 Result->setIsInBounds(GEP->isInBounds()); 951 NewInsts.push_back(Result); 952 return Result; 953 } 954 955#if 0 956 // FIXME: This code works, but it is unclear that we actually want to insert 957 // a big chain of computation in order to make a value available in a block. 958 // This needs to be evaluated carefully to consider its cost trade offs. 959 960 // Handle add with a constant RHS. 961 if (Inst->getOpcode() == Instruction::Add && 962 isa<ConstantInt>(Inst->getOperand(1))) { 963 // PHI translate the LHS. 964 Value *OpVal = InsertPHITranslatedPointer(Inst->getOperand(0), 965 CurBB, PredBB, TD, DT, NewInsts); 966 if (OpVal == 0) return 0; 967 968 BinaryOperator *Res = BinaryOperator::CreateAdd(OpVal, Inst->getOperand(1), 969 InVal->getName()+".phi.trans.insert", 970 PredBB->getTerminator()); 971 Res->setHasNoSignedWrap(cast<BinaryOperator>(Inst)->hasNoSignedWrap()); 972 Res->setHasNoUnsignedWrap(cast<BinaryOperator>(Inst)->hasNoUnsignedWrap()); 973 NewInsts.push_back(Res); 974 return Res; 975 } 976#endif 977 978 return 0; 979} 980 981/// getNonLocalPointerDepFromBB - Perform a dependency query based on 982/// pointer/pointeesize starting at the end of StartBB. Add any clobber/def 983/// results to the results vector and keep track of which blocks are visited in 984/// 'Visited'. 985/// 986/// This has special behavior for the first block queries (when SkipFirstBlock 987/// is true). In this special case, it ignores the contents of the specified 988/// block and starts returning dependence info for its predecessors. 989/// 990/// This function returns false on success, or true to indicate that it could 991/// not compute dependence information for some reason. This should be treated 992/// as a clobber dependence on the first instruction in the predecessor block. 993bool MemoryDependenceAnalysis:: 994getNonLocalPointerDepFromBB(Value *Pointer, uint64_t PointeeSize, 995 bool isLoad, BasicBlock *StartBB, 996 SmallVectorImpl<NonLocalDepEntry> &Result, 997 DenseMap<BasicBlock*, Value*> &Visited, 998 bool SkipFirstBlock) { 999 1000 // Look up the cached info for Pointer. 1001 ValueIsLoadPair CacheKey(Pointer, isLoad); 1002 1003 std::pair<BBSkipFirstBlockPair, NonLocalDepInfo> *CacheInfo = 1004 &NonLocalPointerDeps[CacheKey]; 1005 NonLocalDepInfo *Cache = &CacheInfo->second; 1006 1007 // If we have valid cached information for exactly the block we are 1008 // investigating, just return it with no recomputation. 1009 if (CacheInfo->first == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) { 1010 // We have a fully cached result for this query then we can just return the 1011 // cached results and populate the visited set. However, we have to verify 1012 // that we don't already have conflicting results for these blocks. Check 1013 // to ensure that if a block in the results set is in the visited set that 1014 // it was for the same pointer query. 1015 if (!Visited.empty()) { 1016 for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end(); 1017 I != E; ++I) { 1018 DenseMap<BasicBlock*, Value*>::iterator VI = Visited.find(I->first); 1019 if (VI == Visited.end() || VI->second == Pointer) continue; 1020 1021 // We have a pointer mismatch in a block. Just return clobber, saying 1022 // that something was clobbered in this result. We could also do a 1023 // non-fully cached query, but there is little point in doing this. 1024 return true; 1025 } 1026 } 1027 1028 for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end(); 1029 I != E; ++I) { 1030 Visited.insert(std::make_pair(I->first, Pointer)); 1031 if (!I->second.isNonLocal()) 1032 Result.push_back(*I); 1033 } 1034 ++NumCacheCompleteNonLocalPtr; 1035 return false; 1036 } 1037 1038 // Otherwise, either this is a new block, a block with an invalid cache 1039 // pointer or one that we're about to invalidate by putting more info into it 1040 // than its valid cache info. If empty, the result will be valid cache info, 1041 // otherwise it isn't. 1042 if (Cache->empty()) 1043 CacheInfo->first = BBSkipFirstBlockPair(StartBB, SkipFirstBlock); 1044 else 1045 CacheInfo->first = BBSkipFirstBlockPair(); 1046 1047 SmallVector<BasicBlock*, 32> Worklist; 1048 Worklist.push_back(StartBB); 1049 1050 // Keep track of the entries that we know are sorted. Previously cached 1051 // entries will all be sorted. The entries we add we only sort on demand (we 1052 // don't insert every element into its sorted position). We know that we 1053 // won't get any reuse from currently inserted values, because we don't 1054 // revisit blocks after we insert info for them. 1055 unsigned NumSortedEntries = Cache->size(); 1056 DEBUG(AssertSorted(*Cache)); 1057 1058 while (!Worklist.empty()) { 1059 BasicBlock *BB = Worklist.pop_back_val(); 1060 1061 // Skip the first block if we have it. 1062 if (!SkipFirstBlock) { 1063 // Analyze the dependency of *Pointer in FromBB. See if we already have 1064 // been here. 1065 assert(Visited.count(BB) && "Should check 'visited' before adding to WL"); 1066 1067 // Get the dependency info for Pointer in BB. If we have cached 1068 // information, we will use it, otherwise we compute it. 1069 DEBUG(AssertSorted(*Cache, NumSortedEntries)); 1070 MemDepResult Dep = GetNonLocalInfoForBlock(Pointer, PointeeSize, isLoad, 1071 BB, Cache, NumSortedEntries); 1072 1073 // If we got a Def or Clobber, add this to the list of results. 1074 if (!Dep.isNonLocal()) { 1075 Result.push_back(NonLocalDepEntry(BB, Dep)); 1076 continue; 1077 } 1078 } 1079 1080 // If 'Pointer' is an instruction defined in this block, then we need to do 1081 // phi translation to change it into a value live in the predecessor block. 1082 // If phi translation fails, then we can't continue dependence analysis. 1083 Instruction *PtrInst = dyn_cast<Instruction>(Pointer); 1084 bool NeedsPHITranslation = PtrInst && PtrInst->getParent() == BB; 1085 1086 // If no PHI translation is needed, just add all the predecessors of this 1087 // block to scan them as well. 1088 if (!NeedsPHITranslation) { 1089 SkipFirstBlock = false; 1090 for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) { 1091 // Verify that we haven't looked at this block yet. 1092 std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool> 1093 InsertRes = Visited.insert(std::make_pair(*PI, Pointer)); 1094 if (InsertRes.second) { 1095 // First time we've looked at *PI. 1096 Worklist.push_back(*PI); 1097 continue; 1098 } 1099 1100 // If we have seen this block before, but it was with a different 1101 // pointer then we have a phi translation failure and we have to treat 1102 // this as a clobber. 1103 if (InsertRes.first->second != Pointer) 1104 goto PredTranslationFailure; 1105 } 1106 continue; 1107 } 1108 1109 // If we do need to do phi translation, then there are a bunch of different 1110 // cases, because we have to find a Value* live in the predecessor block. We 1111 // know that PtrInst is defined in this block at least. 1112 1113 // We may have added values to the cache list before this PHI translation. 1114 // If so, we haven't done anything to ensure that the cache remains sorted. 1115 // Sort it now (if needed) so that recursive invocations of 1116 // getNonLocalPointerDepFromBB and other routines that could reuse the cache 1117 // value will only see properly sorted cache arrays. 1118 if (Cache && NumSortedEntries != Cache->size()) { 1119 SortNonLocalDepInfoCache(*Cache, NumSortedEntries); 1120 NumSortedEntries = Cache->size(); 1121 } 1122 1123 // If this is a computation derived from a PHI node, use the suitably 1124 // translated incoming values for each pred as the phi translated version. 1125 if (!isPHITranslatable(PtrInst)) 1126 goto PredTranslationFailure; 1127 1128 Cache = 0; 1129 1130 for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) { 1131 BasicBlock *Pred = *PI; 1132 // Get the PHI translated pointer in this predecessor. This can fail and 1133 // return null if not translatable. 1134 Value *PredPtr = GetPHITranslatedValue(PtrInst, BB, Pred, TD); 1135 1136 // Check to see if we have already visited this pred block with another 1137 // pointer. If so, we can't do this lookup. This failure can occur 1138 // with PHI translation when a critical edge exists and the PHI node in 1139 // the successor translates to a pointer value different than the 1140 // pointer the block was first analyzed with. 1141 std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool> 1142 InsertRes = Visited.insert(std::make_pair(Pred, PredPtr)); 1143 1144 if (!InsertRes.second) { 1145 // If the predecessor was visited with PredPtr, then we already did 1146 // the analysis and can ignore it. 1147 if (InsertRes.first->second == PredPtr) 1148 continue; 1149 1150 // Otherwise, the block was previously analyzed with a different 1151 // pointer. We can't represent the result of this case, so we just 1152 // treat this as a phi translation failure. 1153 goto PredTranslationFailure; 1154 } 1155 1156 // If PHI translation was unable to find an available pointer in this 1157 // predecessor, then we have to assume that the pointer is clobbered in 1158 // that predecessor. We can still do PRE of the load, which would insert 1159 // a computation of the pointer in this predecessor. 1160 if (PredPtr == 0) { 1161 // Add the entry to the Result list. 1162 NonLocalDepEntry Entry(Pred, 1163 MemDepResult::getClobber(Pred->getTerminator())); 1164 Result.push_back(Entry); 1165 1166 // Add it to the cache for this CacheKey so that subsequent queries get 1167 // this result. 1168 Cache = &NonLocalPointerDeps[CacheKey].second; 1169 MemoryDependenceAnalysis::NonLocalDepInfo::iterator It = 1170 std::upper_bound(Cache->begin(), Cache->end(), Entry); 1171 1172 if (It != Cache->begin() && prior(It)->first == Pred) 1173 --It; 1174 1175 if (It == Cache->end() || It->first != Pred) { 1176 Cache->insert(It, Entry); 1177 // Add it to the reverse map. 1178 ReverseNonLocalPtrDeps[Pred->getTerminator()].insert(CacheKey); 1179 } else if (!It->second.isDirty()) { 1180 // noop 1181 } else if (It->second.getInst() == Pred->getTerminator()) { 1182 // Same instruction, clear the dirty marker. 1183 It->second = Entry.second; 1184 } else if (It->second.getInst() == 0) { 1185 // Dirty, with no instruction, just add this. 1186 It->second = Entry.second; 1187 ReverseNonLocalPtrDeps[Pred->getTerminator()].insert(CacheKey); 1188 } else { 1189 // Otherwise, dirty with a different instruction. 1190 RemoveFromReverseMap(ReverseNonLocalPtrDeps, It->second.getInst(), 1191 CacheKey); 1192 It->second = Entry.second; 1193 ReverseNonLocalPtrDeps[Pred->getTerminator()].insert(CacheKey); 1194 } 1195 Cache = 0; 1196 continue; 1197 } 1198 1199 // FIXME: it is entirely possible that PHI translating will end up with 1200 // the same value. Consider PHI translating something like: 1201 // X = phi [x, bb1], [y, bb2]. PHI translating for bb1 doesn't *need* 1202 // to recurse here, pedantically speaking. 1203 1204 // If we have a problem phi translating, fall through to the code below 1205 // to handle the failure condition. 1206 if (getNonLocalPointerDepFromBB(PredPtr, PointeeSize, isLoad, Pred, 1207 Result, Visited)) 1208 goto PredTranslationFailure; 1209 } 1210 1211 // Refresh the CacheInfo/Cache pointer so that it isn't invalidated. 1212 CacheInfo = &NonLocalPointerDeps[CacheKey]; 1213 Cache = &CacheInfo->second; 1214 NumSortedEntries = Cache->size(); 1215 1216 // Since we did phi translation, the "Cache" set won't contain all of the 1217 // results for the query. This is ok (we can still use it to accelerate 1218 // specific block queries) but we can't do the fastpath "return all 1219 // results from the set" Clear out the indicator for this. 1220 CacheInfo->first = BBSkipFirstBlockPair(); 1221 SkipFirstBlock = false; 1222 continue; 1223 1224 PredTranslationFailure: 1225 1226 if (Cache == 0) { 1227 // Refresh the CacheInfo/Cache pointer if it got invalidated. 1228 CacheInfo = &NonLocalPointerDeps[CacheKey]; 1229 Cache = &CacheInfo->second; 1230 NumSortedEntries = Cache->size(); 1231 } 1232 1233 // Since we did phi translation, the "Cache" set won't contain all of the 1234 // results for the query. This is ok (we can still use it to accelerate 1235 // specific block queries) but we can't do the fastpath "return all 1236 // results from the set" Clear out the indicator for this. 1237 CacheInfo->first = BBSkipFirstBlockPair(); 1238 1239 // If *nothing* works, mark the pointer as being clobbered by the first 1240 // instruction in this block. 1241 // 1242 // If this is the magic first block, return this as a clobber of the whole 1243 // incoming value. Since we can't phi translate to one of the predecessors, 1244 // we have to bail out. 1245 if (SkipFirstBlock) 1246 return true; 1247 1248 for (NonLocalDepInfo::reverse_iterator I = Cache->rbegin(); ; ++I) { 1249 assert(I != Cache->rend() && "Didn't find current block??"); 1250 if (I->first != BB) 1251 continue; 1252 1253 assert(I->second.isNonLocal() && 1254 "Should only be here with transparent block"); 1255 I->second = MemDepResult::getClobber(BB->begin()); 1256 ReverseNonLocalPtrDeps[BB->begin()].insert(CacheKey); 1257 Result.push_back(*I); 1258 break; 1259 } 1260 } 1261 1262 // Okay, we're done now. If we added new values to the cache, re-sort it. 1263 SortNonLocalDepInfoCache(*Cache, NumSortedEntries); 1264 DEBUG(AssertSorted(*Cache)); 1265 return false; 1266} 1267 1268/// RemoveCachedNonLocalPointerDependencies - If P exists in 1269/// CachedNonLocalPointerInfo, remove it. 1270void MemoryDependenceAnalysis:: 1271RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair P) { 1272 CachedNonLocalPointerInfo::iterator It = 1273 NonLocalPointerDeps.find(P); 1274 if (It == NonLocalPointerDeps.end()) return; 1275 1276 // Remove all of the entries in the BB->val map. This involves removing 1277 // instructions from the reverse map. 1278 NonLocalDepInfo &PInfo = It->second.second; 1279 1280 for (unsigned i = 0, e = PInfo.size(); i != e; ++i) { 1281 Instruction *Target = PInfo[i].second.getInst(); 1282 if (Target == 0) continue; // Ignore non-local dep results. 1283 assert(Target->getParent() == PInfo[i].first); 1284 1285 // Eliminating the dirty entry from 'Cache', so update the reverse info. 1286 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Target, P); 1287 } 1288 1289 // Remove P from NonLocalPointerDeps (which deletes NonLocalDepInfo). 1290 NonLocalPointerDeps.erase(It); 1291} 1292 1293 1294/// invalidateCachedPointerInfo - This method is used to invalidate cached 1295/// information about the specified pointer, because it may be too 1296/// conservative in memdep. This is an optional call that can be used when 1297/// the client detects an equivalence between the pointer and some other 1298/// value and replaces the other value with ptr. This can make Ptr available 1299/// in more places that cached info does not necessarily keep. 1300void MemoryDependenceAnalysis::invalidateCachedPointerInfo(Value *Ptr) { 1301 // If Ptr isn't really a pointer, just ignore it. 1302 if (!isa<PointerType>(Ptr->getType())) return; 1303 // Flush store info for the pointer. 1304 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, false)); 1305 // Flush load info for the pointer. 1306 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, true)); 1307} 1308 1309/// removeInstruction - Remove an instruction from the dependence analysis, 1310/// updating the dependence of instructions that previously depended on it. 1311/// This method attempts to keep the cache coherent using the reverse map. 1312void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) { 1313 // Walk through the Non-local dependencies, removing this one as the value 1314 // for any cached queries. 1315 NonLocalDepMapType::iterator NLDI = NonLocalDeps.find(RemInst); 1316 if (NLDI != NonLocalDeps.end()) { 1317 NonLocalDepInfo &BlockMap = NLDI->second.first; 1318 for (NonLocalDepInfo::iterator DI = BlockMap.begin(), DE = BlockMap.end(); 1319 DI != DE; ++DI) 1320 if (Instruction *Inst = DI->second.getInst()) 1321 RemoveFromReverseMap(ReverseNonLocalDeps, Inst, RemInst); 1322 NonLocalDeps.erase(NLDI); 1323 } 1324 1325 // If we have a cached local dependence query for this instruction, remove it. 1326 // 1327 LocalDepMapType::iterator LocalDepEntry = LocalDeps.find(RemInst); 1328 if (LocalDepEntry != LocalDeps.end()) { 1329 // Remove us from DepInst's reverse set now that the local dep info is gone. 1330 if (Instruction *Inst = LocalDepEntry->second.getInst()) 1331 RemoveFromReverseMap(ReverseLocalDeps, Inst, RemInst); 1332 1333 // Remove this local dependency info. 1334 LocalDeps.erase(LocalDepEntry); 1335 } 1336 1337 // If we have any cached pointer dependencies on this instruction, remove 1338 // them. If the instruction has non-pointer type, then it can't be a pointer 1339 // base. 1340 1341 // Remove it from both the load info and the store info. The instruction 1342 // can't be in either of these maps if it is non-pointer. 1343 if (isa<PointerType>(RemInst->getType())) { 1344 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, false)); 1345 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, true)); 1346 } 1347 1348 // Loop over all of the things that depend on the instruction we're removing. 1349 // 1350 SmallVector<std::pair<Instruction*, Instruction*>, 8> ReverseDepsToAdd; 1351 1352 // If we find RemInst as a clobber or Def in any of the maps for other values, 1353 // we need to replace its entry with a dirty version of the instruction after 1354 // it. If RemInst is a terminator, we use a null dirty value. 1355 // 1356 // Using a dirty version of the instruction after RemInst saves having to scan 1357 // the entire block to get to this point. 1358 MemDepResult NewDirtyVal; 1359 if (!RemInst->isTerminator()) 1360 NewDirtyVal = MemDepResult::getDirty(++BasicBlock::iterator(RemInst)); 1361 1362 ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst); 1363 if (ReverseDepIt != ReverseLocalDeps.end()) { 1364 SmallPtrSet<Instruction*, 4> &ReverseDeps = ReverseDepIt->second; 1365 // RemInst can't be the terminator if it has local stuff depending on it. 1366 assert(!ReverseDeps.empty() && !isa<TerminatorInst>(RemInst) && 1367 "Nothing can locally depend on a terminator"); 1368 1369 for (SmallPtrSet<Instruction*, 4>::iterator I = ReverseDeps.begin(), 1370 E = ReverseDeps.end(); I != E; ++I) { 1371 Instruction *InstDependingOnRemInst = *I; 1372 assert(InstDependingOnRemInst != RemInst && 1373 "Already removed our local dep info"); 1374 1375 LocalDeps[InstDependingOnRemInst] = NewDirtyVal; 1376 1377 // Make sure to remember that new things depend on NewDepInst. 1378 assert(NewDirtyVal.getInst() && "There is no way something else can have " 1379 "a local dep on this if it is a terminator!"); 1380 ReverseDepsToAdd.push_back(std::make_pair(NewDirtyVal.getInst(), 1381 InstDependingOnRemInst)); 1382 } 1383 1384 ReverseLocalDeps.erase(ReverseDepIt); 1385 1386 // Add new reverse deps after scanning the set, to avoid invalidating the 1387 // 'ReverseDeps' reference. 1388 while (!ReverseDepsToAdd.empty()) { 1389 ReverseLocalDeps[ReverseDepsToAdd.back().first] 1390 .insert(ReverseDepsToAdd.back().second); 1391 ReverseDepsToAdd.pop_back(); 1392 } 1393 } 1394 1395 ReverseDepIt = ReverseNonLocalDeps.find(RemInst); 1396 if (ReverseDepIt != ReverseNonLocalDeps.end()) { 1397 SmallPtrSet<Instruction*, 4> &Set = ReverseDepIt->second; 1398 for (SmallPtrSet<Instruction*, 4>::iterator I = Set.begin(), E = Set.end(); 1399 I != E; ++I) { 1400 assert(*I != RemInst && "Already removed NonLocalDep info for RemInst"); 1401 1402 PerInstNLInfo &INLD = NonLocalDeps[*I]; 1403 // The information is now dirty! 1404 INLD.second = true; 1405 1406 for (NonLocalDepInfo::iterator DI = INLD.first.begin(), 1407 DE = INLD.first.end(); DI != DE; ++DI) { 1408 if (DI->second.getInst() != RemInst) continue; 1409 1410 // Convert to a dirty entry for the subsequent instruction. 1411 DI->second = NewDirtyVal; 1412 1413 if (Instruction *NextI = NewDirtyVal.getInst()) 1414 ReverseDepsToAdd.push_back(std::make_pair(NextI, *I)); 1415 } 1416 } 1417 1418 ReverseNonLocalDeps.erase(ReverseDepIt); 1419 1420 // Add new reverse deps after scanning the set, to avoid invalidating 'Set' 1421 while (!ReverseDepsToAdd.empty()) { 1422 ReverseNonLocalDeps[ReverseDepsToAdd.back().first] 1423 .insert(ReverseDepsToAdd.back().second); 1424 ReverseDepsToAdd.pop_back(); 1425 } 1426 } 1427 1428 // If the instruction is in ReverseNonLocalPtrDeps then it appears as a 1429 // value in the NonLocalPointerDeps info. 1430 ReverseNonLocalPtrDepTy::iterator ReversePtrDepIt = 1431 ReverseNonLocalPtrDeps.find(RemInst); 1432 if (ReversePtrDepIt != ReverseNonLocalPtrDeps.end()) { 1433 SmallPtrSet<ValueIsLoadPair, 4> &Set = ReversePtrDepIt->second; 1434 SmallVector<std::pair<Instruction*, ValueIsLoadPair>,8> ReversePtrDepsToAdd; 1435 1436 for (SmallPtrSet<ValueIsLoadPair, 4>::iterator I = Set.begin(), 1437 E = Set.end(); I != E; ++I) { 1438 ValueIsLoadPair P = *I; 1439 assert(P.getPointer() != RemInst && 1440 "Already removed NonLocalPointerDeps info for RemInst"); 1441 1442 NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].second; 1443 1444 // The cache is not valid for any specific block anymore. 1445 NonLocalPointerDeps[P].first = BBSkipFirstBlockPair(); 1446 1447 // Update any entries for RemInst to use the instruction after it. 1448 for (NonLocalDepInfo::iterator DI = NLPDI.begin(), DE = NLPDI.end(); 1449 DI != DE; ++DI) { 1450 if (DI->second.getInst() != RemInst) continue; 1451 1452 // Convert to a dirty entry for the subsequent instruction. 1453 DI->second = NewDirtyVal; 1454 1455 if (Instruction *NewDirtyInst = NewDirtyVal.getInst()) 1456 ReversePtrDepsToAdd.push_back(std::make_pair(NewDirtyInst, P)); 1457 } 1458 1459 // Re-sort the NonLocalDepInfo. Changing the dirty entry to its 1460 // subsequent value may invalidate the sortedness. 1461 std::sort(NLPDI.begin(), NLPDI.end()); 1462 } 1463 1464 ReverseNonLocalPtrDeps.erase(ReversePtrDepIt); 1465 1466 while (!ReversePtrDepsToAdd.empty()) { 1467 ReverseNonLocalPtrDeps[ReversePtrDepsToAdd.back().first] 1468 .insert(ReversePtrDepsToAdd.back().second); 1469 ReversePtrDepsToAdd.pop_back(); 1470 } 1471 } 1472 1473 1474 assert(!NonLocalDeps.count(RemInst) && "RemInst got reinserted?"); 1475 AA->deleteValue(RemInst); 1476 DEBUG(verifyRemoved(RemInst)); 1477} 1478/// verifyRemoved - Verify that the specified instruction does not occur 1479/// in our internal data structures. 1480void MemoryDependenceAnalysis::verifyRemoved(Instruction *D) const { 1481 for (LocalDepMapType::const_iterator I = LocalDeps.begin(), 1482 E = LocalDeps.end(); I != E; ++I) { 1483 assert(I->first != D && "Inst occurs in data structures"); 1484 assert(I->second.getInst() != D && 1485 "Inst occurs in data structures"); 1486 } 1487 1488 for (CachedNonLocalPointerInfo::const_iterator I =NonLocalPointerDeps.begin(), 1489 E = NonLocalPointerDeps.end(); I != E; ++I) { 1490 assert(I->first.getPointer() != D && "Inst occurs in NLPD map key"); 1491 const NonLocalDepInfo &Val = I->second.second; 1492 for (NonLocalDepInfo::const_iterator II = Val.begin(), E = Val.end(); 1493 II != E; ++II) 1494 assert(II->second.getInst() != D && "Inst occurs as NLPD value"); 1495 } 1496 1497 for (NonLocalDepMapType::const_iterator I = NonLocalDeps.begin(), 1498 E = NonLocalDeps.end(); I != E; ++I) { 1499 assert(I->first != D && "Inst occurs in data structures"); 1500 const PerInstNLInfo &INLD = I->second; 1501 for (NonLocalDepInfo::const_iterator II = INLD.first.begin(), 1502 EE = INLD.first.end(); II != EE; ++II) 1503 assert(II->second.getInst() != D && "Inst occurs in data structures"); 1504 } 1505 1506 for (ReverseDepMapType::const_iterator I = ReverseLocalDeps.begin(), 1507 E = ReverseLocalDeps.end(); I != E; ++I) { 1508 assert(I->first != D && "Inst occurs in data structures"); 1509 for (SmallPtrSet<Instruction*, 4>::const_iterator II = I->second.begin(), 1510 EE = I->second.end(); II != EE; ++II) 1511 assert(*II != D && "Inst occurs in data structures"); 1512 } 1513 1514 for (ReverseDepMapType::const_iterator I = ReverseNonLocalDeps.begin(), 1515 E = ReverseNonLocalDeps.end(); 1516 I != E; ++I) { 1517 assert(I->first != D && "Inst occurs in data structures"); 1518 for (SmallPtrSet<Instruction*, 4>::const_iterator II = I->second.begin(), 1519 EE = I->second.end(); II != EE; ++II) 1520 assert(*II != D && "Inst occurs in data structures"); 1521 } 1522 1523 for (ReverseNonLocalPtrDepTy::const_iterator 1524 I = ReverseNonLocalPtrDeps.begin(), 1525 E = ReverseNonLocalPtrDeps.end(); I != E; ++I) { 1526 assert(I->first != D && "Inst occurs in rev NLPD map"); 1527 1528 for (SmallPtrSet<ValueIsLoadPair, 4>::const_iterator II = I->second.begin(), 1529 E = I->second.end(); II != E; ++II) 1530 assert(*II != ValueIsLoadPair(D, false) && 1531 *II != ValueIsLoadPair(D, true) && 1532 "Inst occurs in ReverseNonLocalPtrDeps map"); 1533 } 1534 1535} 1536