MemoryDependenceAnalysis.cpp revision 2eac9493fc68f9df248f1a615413be1d2016b4d6
1//===- MemoryDependenceAnalysis.cpp - Mem Deps Implementation --*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements an analysis that determines, for a given memory 11// operation, what preceding memory operations it depends on. It builds on 12// alias analysis information, and tries to provide a lazy, caching interface to 13// a common kind of alias information query. 14// 15//===----------------------------------------------------------------------===// 16 17#define DEBUG_TYPE "memdep" 18#include "llvm/Analysis/MemoryDependenceAnalysis.h" 19#include "llvm/Instructions.h" 20#include "llvm/IntrinsicInst.h" 21#include "llvm/Function.h" 22#include "llvm/Analysis/AliasAnalysis.h" 23#include "llvm/Analysis/Dominators.h" 24#include "llvm/Analysis/InstructionSimplify.h" 25#include "llvm/Analysis/MemoryBuiltins.h" 26#include "llvm/ADT/Statistic.h" 27#include "llvm/ADT/STLExtras.h" 28#include "llvm/Support/PredIteratorCache.h" 29#include "llvm/Support/Debug.h" 30using namespace llvm; 31 32STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses"); 33STATISTIC(NumCacheDirtyNonLocal, "Number of dirty cached non-local responses"); 34STATISTIC(NumUncacheNonLocal, "Number of uncached non-local responses"); 35 36STATISTIC(NumCacheNonLocalPtr, 37 "Number of fully cached non-local ptr responses"); 38STATISTIC(NumCacheDirtyNonLocalPtr, 39 "Number of cached, but dirty, non-local ptr responses"); 40STATISTIC(NumUncacheNonLocalPtr, 41 "Number of uncached non-local ptr responses"); 42STATISTIC(NumCacheCompleteNonLocalPtr, 43 "Number of block queries that were completely cached"); 44 45char MemoryDependenceAnalysis::ID = 0; 46 47// Register this pass... 48static RegisterPass<MemoryDependenceAnalysis> X("memdep", 49 "Memory Dependence Analysis", false, true); 50 51MemoryDependenceAnalysis::MemoryDependenceAnalysis() 52: FunctionPass(&ID), PredCache(0) { 53} 54MemoryDependenceAnalysis::~MemoryDependenceAnalysis() { 55} 56 57/// Clean up memory in between runs 58void MemoryDependenceAnalysis::releaseMemory() { 59 LocalDeps.clear(); 60 NonLocalDeps.clear(); 61 NonLocalPointerDeps.clear(); 62 ReverseLocalDeps.clear(); 63 ReverseNonLocalDeps.clear(); 64 ReverseNonLocalPtrDeps.clear(); 65 PredCache->clear(); 66} 67 68 69 70/// getAnalysisUsage - Does not modify anything. It uses Alias Analysis. 71/// 72void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { 73 AU.setPreservesAll(); 74 AU.addRequiredTransitive<AliasAnalysis>(); 75} 76 77bool MemoryDependenceAnalysis::runOnFunction(Function &) { 78 AA = &getAnalysis<AliasAnalysis>(); 79 if (PredCache == 0) 80 PredCache.reset(new PredIteratorCache()); 81 return false; 82} 83 84/// RemoveFromReverseMap - This is a helper function that removes Val from 85/// 'Inst's set in ReverseMap. If the set becomes empty, remove Inst's entry. 86template <typename KeyTy> 87static void RemoveFromReverseMap(DenseMap<Instruction*, 88 SmallPtrSet<KeyTy, 4> > &ReverseMap, 89 Instruction *Inst, KeyTy Val) { 90 typename DenseMap<Instruction*, SmallPtrSet<KeyTy, 4> >::iterator 91 InstIt = ReverseMap.find(Inst); 92 assert(InstIt != ReverseMap.end() && "Reverse map out of sync?"); 93 bool Found = InstIt->second.erase(Val); 94 assert(Found && "Invalid reverse map!"); Found=Found; 95 if (InstIt->second.empty()) 96 ReverseMap.erase(InstIt); 97} 98 99 100/// getCallSiteDependencyFrom - Private helper for finding the local 101/// dependencies of a call site. 102MemDepResult MemoryDependenceAnalysis:: 103getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall, 104 BasicBlock::iterator ScanIt, BasicBlock *BB) { 105 // Walk backwards through the block, looking for dependencies 106 while (ScanIt != BB->begin()) { 107 Instruction *Inst = --ScanIt; 108 109 // If this inst is a memory op, get the pointer it accessed 110 Value *Pointer = 0; 111 uint64_t PointerSize = 0; 112 if (StoreInst *S = dyn_cast<StoreInst>(Inst)) { 113 Pointer = S->getPointerOperand(); 114 PointerSize = AA->getTypeStoreSize(S->getOperand(0)->getType()); 115 } else if (VAArgInst *V = dyn_cast<VAArgInst>(Inst)) { 116 Pointer = V->getOperand(0); 117 PointerSize = AA->getTypeStoreSize(V->getType()); 118 } else if (isFreeCall(Inst)) { 119 Pointer = Inst->getOperand(1); 120 // calls to free() erase the entire structure 121 PointerSize = ~0ULL; 122 } else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) { 123 // Debug intrinsics don't cause dependences. 124 if (isa<DbgInfoIntrinsic>(Inst)) continue; 125 CallSite InstCS = CallSite::get(Inst); 126 // If these two calls do not interfere, look past it. 127 switch (AA->getModRefInfo(CS, InstCS)) { 128 case AliasAnalysis::NoModRef: 129 // If the two calls don't interact (e.g. InstCS is readnone) keep 130 // scanning. 131 continue; 132 case AliasAnalysis::Ref: 133 // If the two calls read the same memory locations and CS is a readonly 134 // function, then we have two cases: 1) the calls may not interfere with 135 // each other at all. 2) the calls may produce the same value. In case 136 // #1 we want to ignore the values, in case #2, we want to return Inst 137 // as a Def dependence. This allows us to CSE in cases like: 138 // X = strlen(P); 139 // memchr(...); 140 // Y = strlen(P); // Y = X 141 if (isReadOnlyCall) { 142 if (CS.getCalledFunction() != 0 && 143 CS.getCalledFunction() == InstCS.getCalledFunction()) 144 return MemDepResult::getDef(Inst); 145 // Ignore unrelated read/read call dependences. 146 continue; 147 } 148 // FALL THROUGH 149 default: 150 return MemDepResult::getClobber(Inst); 151 } 152 } else { 153 // Non-memory instruction. 154 continue; 155 } 156 157 if (AA->getModRefInfo(CS, Pointer, PointerSize) != AliasAnalysis::NoModRef) 158 return MemDepResult::getClobber(Inst); 159 } 160 161 // No dependence found. If this is the entry block of the function, it is a 162 // clobber, otherwise it is non-local. 163 if (BB != &BB->getParent()->getEntryBlock()) 164 return MemDepResult::getNonLocal(); 165 return MemDepResult::getClobber(ScanIt); 166} 167 168/// getPointerDependencyFrom - Return the instruction on which a memory 169/// location depends. If isLoad is true, this routine ignore may-aliases with 170/// read-only operations. 171MemDepResult MemoryDependenceAnalysis:: 172getPointerDependencyFrom(Value *MemPtr, uint64_t MemSize, bool isLoad, 173 BasicBlock::iterator ScanIt, BasicBlock *BB) { 174 175 Value *invariantTag = 0; 176 177 // Walk backwards through the basic block, looking for dependencies. 178 while (ScanIt != BB->begin()) { 179 Instruction *Inst = --ScanIt; 180 181 // If we're in an invariant region, no dependencies can be found before 182 // we pass an invariant-begin marker. 183 if (invariantTag == Inst) { 184 invariantTag = 0; 185 continue; 186 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 187 // If we pass an invariant-end marker, then we've just entered an 188 // invariant region and can start ignoring dependencies. 189 if (II->getIntrinsicID() == Intrinsic::invariant_end) { 190 uint64_t invariantSize = ~0ULL; 191 if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getOperand(2))) 192 invariantSize = CI->getZExtValue(); 193 194 AliasAnalysis::AliasResult R = 195 AA->alias(II->getOperand(3), invariantSize, MemPtr, MemSize); 196 if (R == AliasAnalysis::MustAlias) { 197 invariantTag = II->getOperand(1); 198 continue; 199 } 200 201 // If we reach a lifetime begin or end marker, then the query ends here 202 // because the value is undefined. 203 } else if (II->getIntrinsicID() == Intrinsic::lifetime_start || 204 II->getIntrinsicID() == Intrinsic::lifetime_end) { 205 uint64_t invariantSize = ~0ULL; 206 if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getOperand(1))) 207 invariantSize = CI->getZExtValue(); 208 209 AliasAnalysis::AliasResult R = 210 AA->alias(II->getOperand(2), invariantSize, MemPtr, MemSize); 211 if (R == AliasAnalysis::MustAlias) 212 return MemDepResult::getDef(II); 213 } 214 } 215 216 // If we're querying on a load and we're in an invariant region, we're done 217 // at this point. Nothing a load depends on can live in an invariant region. 218 if (isLoad && invariantTag) continue; 219 220 // Debug intrinsics don't cause dependences. 221 if (isa<DbgInfoIntrinsic>(Inst)) continue; 222 223 // Values depend on loads if the pointers are must aliased. This means that 224 // a load depends on another must aliased load from the same value. 225 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 226 Value *Pointer = LI->getPointerOperand(); 227 uint64_t PointerSize = AA->getTypeStoreSize(LI->getType()); 228 229 // If we found a pointer, check if it could be the same as our pointer. 230 AliasAnalysis::AliasResult R = 231 AA->alias(Pointer, PointerSize, MemPtr, MemSize); 232 if (R == AliasAnalysis::NoAlias) 233 continue; 234 235 // May-alias loads don't depend on each other without a dependence. 236 if (isLoad && R == AliasAnalysis::MayAlias) 237 continue; 238 // Stores depend on may and must aliased loads, loads depend on must-alias 239 // loads. 240 return MemDepResult::getDef(Inst); 241 } 242 243 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 244 // There can't be stores to the value we care about inside an 245 // invariant region. 246 if (invariantTag) continue; 247 248 // If alias analysis can tell that this store is guaranteed to not modify 249 // the query pointer, ignore it. Use getModRefInfo to handle cases where 250 // the query pointer points to constant memory etc. 251 if (AA->getModRefInfo(SI, MemPtr, MemSize) == AliasAnalysis::NoModRef) 252 continue; 253 254 // Ok, this store might clobber the query pointer. Check to see if it is 255 // a must alias: in this case, we want to return this as a def. 256 Value *Pointer = SI->getPointerOperand(); 257 uint64_t PointerSize = AA->getTypeStoreSize(SI->getOperand(0)->getType()); 258 259 // If we found a pointer, check if it could be the same as our pointer. 260 AliasAnalysis::AliasResult R = 261 AA->alias(Pointer, PointerSize, MemPtr, MemSize); 262 263 if (R == AliasAnalysis::NoAlias) 264 continue; 265 if (R == AliasAnalysis::MayAlias) 266 return MemDepResult::getClobber(Inst); 267 return MemDepResult::getDef(Inst); 268 } 269 270 // If this is an allocation, and if we know that the accessed pointer is to 271 // the allocation, return Def. This means that there is no dependence and 272 // the access can be optimized based on that. For example, a load could 273 // turn into undef. 274 // Note: Only determine this to be a malloc if Inst is the malloc call, not 275 // a subsequent bitcast of the malloc call result. There can be stores to 276 // the malloced memory between the malloc call and its bitcast uses, and we 277 // need to continue scanning until the malloc call. 278 if (isa<AllocaInst>(Inst) || extractMallocCall(Inst)) { 279 Value *AccessPtr = MemPtr->getUnderlyingObject(); 280 281 if (AccessPtr == Inst || 282 AA->alias(Inst, 1, AccessPtr, 1) == AliasAnalysis::MustAlias) 283 return MemDepResult::getDef(Inst); 284 continue; 285 } 286 287 // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer. 288 switch (AA->getModRefInfo(Inst, MemPtr, MemSize)) { 289 case AliasAnalysis::NoModRef: 290 // If the call has no effect on the queried pointer, just ignore it. 291 continue; 292 case AliasAnalysis::Mod: 293 // If we're in an invariant region, we can ignore calls that ONLY 294 // modify the pointer. 295 if (invariantTag) continue; 296 return MemDepResult::getClobber(Inst); 297 case AliasAnalysis::Ref: 298 // If the call is known to never store to the pointer, and if this is a 299 // load query, we can safely ignore it (scan past it). 300 if (isLoad) 301 continue; 302 default: 303 // Otherwise, there is a potential dependence. Return a clobber. 304 return MemDepResult::getClobber(Inst); 305 } 306 } 307 308 // No dependence found. If this is the entry block of the function, it is a 309 // clobber, otherwise it is non-local. 310 if (BB != &BB->getParent()->getEntryBlock()) 311 return MemDepResult::getNonLocal(); 312 return MemDepResult::getClobber(ScanIt); 313} 314 315/// getDependency - Return the instruction on which a memory operation 316/// depends. 317MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) { 318 Instruction *ScanPos = QueryInst; 319 320 // Check for a cached result 321 MemDepResult &LocalCache = LocalDeps[QueryInst]; 322 323 // If the cached entry is non-dirty, just return it. Note that this depends 324 // on MemDepResult's default constructing to 'dirty'. 325 if (!LocalCache.isDirty()) 326 return LocalCache; 327 328 // Otherwise, if we have a dirty entry, we know we can start the scan at that 329 // instruction, which may save us some work. 330 if (Instruction *Inst = LocalCache.getInst()) { 331 ScanPos = Inst; 332 333 RemoveFromReverseMap(ReverseLocalDeps, Inst, QueryInst); 334 } 335 336 BasicBlock *QueryParent = QueryInst->getParent(); 337 338 Value *MemPtr = 0; 339 uint64_t MemSize = 0; 340 341 // Do the scan. 342 if (BasicBlock::iterator(QueryInst) == QueryParent->begin()) { 343 // No dependence found. If this is the entry block of the function, it is a 344 // clobber, otherwise it is non-local. 345 if (QueryParent != &QueryParent->getParent()->getEntryBlock()) 346 LocalCache = MemDepResult::getNonLocal(); 347 else 348 LocalCache = MemDepResult::getClobber(QueryInst); 349 } else if (StoreInst *SI = dyn_cast<StoreInst>(QueryInst)) { 350 // If this is a volatile store, don't mess around with it. Just return the 351 // previous instruction as a clobber. 352 if (SI->isVolatile()) 353 LocalCache = MemDepResult::getClobber(--BasicBlock::iterator(ScanPos)); 354 else { 355 MemPtr = SI->getPointerOperand(); 356 MemSize = AA->getTypeStoreSize(SI->getOperand(0)->getType()); 357 } 358 } else if (LoadInst *LI = dyn_cast<LoadInst>(QueryInst)) { 359 // If this is a volatile load, don't mess around with it. Just return the 360 // previous instruction as a clobber. 361 if (LI->isVolatile()) 362 LocalCache = MemDepResult::getClobber(--BasicBlock::iterator(ScanPos)); 363 else { 364 MemPtr = LI->getPointerOperand(); 365 MemSize = AA->getTypeStoreSize(LI->getType()); 366 } 367 } else if (isFreeCall(QueryInst)) { 368 MemPtr = QueryInst->getOperand(1); 369 // calls to free() erase the entire structure, not just a field. 370 MemSize = ~0UL; 371 } else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) { 372 int IntrinsicID = 0; // Intrinsic IDs start at 1. 373 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(QueryInst)) 374 IntrinsicID = II->getIntrinsicID(); 375 376 switch (IntrinsicID) { 377 case Intrinsic::lifetime_start: 378 case Intrinsic::lifetime_end: 379 case Intrinsic::invariant_start: 380 MemPtr = QueryInst->getOperand(2); 381 MemSize = cast<ConstantInt>(QueryInst->getOperand(1))->getZExtValue(); 382 break; 383 case Intrinsic::invariant_end: 384 MemPtr = QueryInst->getOperand(3); 385 MemSize = cast<ConstantInt>(QueryInst->getOperand(2))->getZExtValue(); 386 break; 387 default: 388 CallSite QueryCS = CallSite::get(QueryInst); 389 bool isReadOnly = AA->onlyReadsMemory(QueryCS); 390 LocalCache = getCallSiteDependencyFrom(QueryCS, isReadOnly, ScanPos, 391 QueryParent); 392 } 393 } else { 394 // Non-memory instruction. 395 LocalCache = MemDepResult::getClobber(--BasicBlock::iterator(ScanPos)); 396 } 397 398 // If we need to do a pointer scan, make it happen. 399 if (MemPtr) { 400 bool isLoad = !QueryInst->mayWriteToMemory(); 401 if (IntrinsicInst *II = dyn_cast<MemoryUseIntrinsic>(QueryInst)) { 402 isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_end; 403 } 404 LocalCache = getPointerDependencyFrom(MemPtr, MemSize, isLoad, ScanPos, 405 QueryParent); 406 } 407 408 // Remember the result! 409 if (Instruction *I = LocalCache.getInst()) 410 ReverseLocalDeps[I].insert(QueryInst); 411 412 return LocalCache; 413} 414 415#ifndef NDEBUG 416/// AssertSorted - This method is used when -debug is specified to verify that 417/// cache arrays are properly kept sorted. 418static void AssertSorted(MemoryDependenceAnalysis::NonLocalDepInfo &Cache, 419 int Count = -1) { 420 if (Count == -1) Count = Cache.size(); 421 if (Count == 0) return; 422 423 for (unsigned i = 1; i != unsigned(Count); ++i) 424 assert(Cache[i-1] <= Cache[i] && "Cache isn't sorted!"); 425} 426#endif 427 428/// getNonLocalCallDependency - Perform a full dependency query for the 429/// specified call, returning the set of blocks that the value is 430/// potentially live across. The returned set of results will include a 431/// "NonLocal" result for all blocks where the value is live across. 432/// 433/// This method assumes the instruction returns a "NonLocal" dependency 434/// within its own block. 435/// 436/// This returns a reference to an internal data structure that may be 437/// invalidated on the next non-local query or when an instruction is 438/// removed. Clients must copy this data if they want it around longer than 439/// that. 440const MemoryDependenceAnalysis::NonLocalDepInfo & 441MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) { 442 assert(getDependency(QueryCS.getInstruction()).isNonLocal() && 443 "getNonLocalCallDependency should only be used on calls with non-local deps!"); 444 PerInstNLInfo &CacheP = NonLocalDeps[QueryCS.getInstruction()]; 445 NonLocalDepInfo &Cache = CacheP.first; 446 447 /// DirtyBlocks - This is the set of blocks that need to be recomputed. In 448 /// the cached case, this can happen due to instructions being deleted etc. In 449 /// the uncached case, this starts out as the set of predecessors we care 450 /// about. 451 SmallVector<BasicBlock*, 32> DirtyBlocks; 452 453 if (!Cache.empty()) { 454 // Okay, we have a cache entry. If we know it is not dirty, just return it 455 // with no computation. 456 if (!CacheP.second) { 457 NumCacheNonLocal++; 458 return Cache; 459 } 460 461 // If we already have a partially computed set of results, scan them to 462 // determine what is dirty, seeding our initial DirtyBlocks worklist. 463 for (NonLocalDepInfo::iterator I = Cache.begin(), E = Cache.end(); 464 I != E; ++I) 465 if (I->second.isDirty()) 466 DirtyBlocks.push_back(I->first); 467 468 // Sort the cache so that we can do fast binary search lookups below. 469 std::sort(Cache.begin(), Cache.end()); 470 471 ++NumCacheDirtyNonLocal; 472 //cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: " 473 // << Cache.size() << " cached: " << *QueryInst; 474 } else { 475 // Seed DirtyBlocks with each of the preds of QueryInst's block. 476 BasicBlock *QueryBB = QueryCS.getInstruction()->getParent(); 477 for (BasicBlock **PI = PredCache->GetPreds(QueryBB); *PI; ++PI) 478 DirtyBlocks.push_back(*PI); 479 NumUncacheNonLocal++; 480 } 481 482 // isReadonlyCall - If this is a read-only call, we can be more aggressive. 483 bool isReadonlyCall = AA->onlyReadsMemory(QueryCS); 484 485 SmallPtrSet<BasicBlock*, 64> Visited; 486 487 unsigned NumSortedEntries = Cache.size(); 488 DEBUG(AssertSorted(Cache)); 489 490 // Iterate while we still have blocks to update. 491 while (!DirtyBlocks.empty()) { 492 BasicBlock *DirtyBB = DirtyBlocks.back(); 493 DirtyBlocks.pop_back(); 494 495 // Already processed this block? 496 if (!Visited.insert(DirtyBB)) 497 continue; 498 499 // Do a binary search to see if we already have an entry for this block in 500 // the cache set. If so, find it. 501 DEBUG(AssertSorted(Cache, NumSortedEntries)); 502 NonLocalDepInfo::iterator Entry = 503 std::upper_bound(Cache.begin(), Cache.begin()+NumSortedEntries, 504 std::make_pair(DirtyBB, MemDepResult())); 505 if (Entry != Cache.begin() && prior(Entry)->first == DirtyBB) 506 --Entry; 507 508 MemDepResult *ExistingResult = 0; 509 if (Entry != Cache.begin()+NumSortedEntries && 510 Entry->first == DirtyBB) { 511 // If we already have an entry, and if it isn't already dirty, the block 512 // is done. 513 if (!Entry->second.isDirty()) 514 continue; 515 516 // Otherwise, remember this slot so we can update the value. 517 ExistingResult = &Entry->second; 518 } 519 520 // If the dirty entry has a pointer, start scanning from it so we don't have 521 // to rescan the entire block. 522 BasicBlock::iterator ScanPos = DirtyBB->end(); 523 if (ExistingResult) { 524 if (Instruction *Inst = ExistingResult->getInst()) { 525 ScanPos = Inst; 526 // We're removing QueryInst's use of Inst. 527 RemoveFromReverseMap(ReverseNonLocalDeps, Inst, 528 QueryCS.getInstruction()); 529 } 530 } 531 532 // Find out if this block has a local dependency for QueryInst. 533 MemDepResult Dep; 534 535 if (ScanPos != DirtyBB->begin()) { 536 Dep = getCallSiteDependencyFrom(QueryCS, isReadonlyCall,ScanPos, DirtyBB); 537 } else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) { 538 // No dependence found. If this is the entry block of the function, it is 539 // a clobber, otherwise it is non-local. 540 Dep = MemDepResult::getNonLocal(); 541 } else { 542 Dep = MemDepResult::getClobber(ScanPos); 543 } 544 545 // If we had a dirty entry for the block, update it. Otherwise, just add 546 // a new entry. 547 if (ExistingResult) 548 *ExistingResult = Dep; 549 else 550 Cache.push_back(std::make_pair(DirtyBB, Dep)); 551 552 // If the block has a dependency (i.e. it isn't completely transparent to 553 // the value), remember the association! 554 if (!Dep.isNonLocal()) { 555 // Keep the ReverseNonLocalDeps map up to date so we can efficiently 556 // update this when we remove instructions. 557 if (Instruction *Inst = Dep.getInst()) 558 ReverseNonLocalDeps[Inst].insert(QueryCS.getInstruction()); 559 } else { 560 561 // If the block *is* completely transparent to the load, we need to check 562 // the predecessors of this block. Add them to our worklist. 563 for (BasicBlock **PI = PredCache->GetPreds(DirtyBB); *PI; ++PI) 564 DirtyBlocks.push_back(*PI); 565 } 566 } 567 568 return Cache; 569} 570 571/// getNonLocalPointerDependency - Perform a full dependency query for an 572/// access to the specified (non-volatile) memory location, returning the 573/// set of instructions that either define or clobber the value. 574/// 575/// This method assumes the pointer has a "NonLocal" dependency within its 576/// own block. 577/// 578void MemoryDependenceAnalysis:: 579getNonLocalPointerDependency(Value *Pointer, bool isLoad, BasicBlock *FromBB, 580 SmallVectorImpl<NonLocalDepEntry> &Result) { 581 assert(isa<PointerType>(Pointer->getType()) && 582 "Can't get pointer deps of a non-pointer!"); 583 Result.clear(); 584 585 // We know that the pointer value is live into FromBB find the def/clobbers 586 // from presecessors. 587 const Type *EltTy = cast<PointerType>(Pointer->getType())->getElementType(); 588 uint64_t PointeeSize = AA->getTypeStoreSize(EltTy); 589 590 // This is the set of blocks we've inspected, and the pointer we consider in 591 // each block. Because of critical edges, we currently bail out if querying 592 // a block with multiple different pointers. This can happen during PHI 593 // translation. 594 DenseMap<BasicBlock*, Value*> Visited; 595 if (!getNonLocalPointerDepFromBB(Pointer, PointeeSize, isLoad, FromBB, 596 Result, Visited, true)) 597 return; 598 Result.clear(); 599 Result.push_back(std::make_pair(FromBB, 600 MemDepResult::getClobber(FromBB->begin()))); 601} 602 603/// GetNonLocalInfoForBlock - Compute the memdep value for BB with 604/// Pointer/PointeeSize using either cached information in Cache or by doing a 605/// lookup (which may use dirty cache info if available). If we do a lookup, 606/// add the result to the cache. 607MemDepResult MemoryDependenceAnalysis:: 608GetNonLocalInfoForBlock(Value *Pointer, uint64_t PointeeSize, 609 bool isLoad, BasicBlock *BB, 610 NonLocalDepInfo *Cache, unsigned NumSortedEntries) { 611 612 // Do a binary search to see if we already have an entry for this block in 613 // the cache set. If so, find it. 614 NonLocalDepInfo::iterator Entry = 615 std::upper_bound(Cache->begin(), Cache->begin()+NumSortedEntries, 616 std::make_pair(BB, MemDepResult())); 617 if (Entry != Cache->begin() && prior(Entry)->first == BB) 618 --Entry; 619 620 MemDepResult *ExistingResult = 0; 621 if (Entry != Cache->begin()+NumSortedEntries && Entry->first == BB) 622 ExistingResult = &Entry->second; 623 624 // If we have a cached entry, and it is non-dirty, use it as the value for 625 // this dependency. 626 if (ExistingResult && !ExistingResult->isDirty()) { 627 ++NumCacheNonLocalPtr; 628 return *ExistingResult; 629 } 630 631 // Otherwise, we have to scan for the value. If we have a dirty cache 632 // entry, start scanning from its position, otherwise we scan from the end 633 // of the block. 634 BasicBlock::iterator ScanPos = BB->end(); 635 if (ExistingResult && ExistingResult->getInst()) { 636 assert(ExistingResult->getInst()->getParent() == BB && 637 "Instruction invalidated?"); 638 ++NumCacheDirtyNonLocalPtr; 639 ScanPos = ExistingResult->getInst(); 640 641 // Eliminating the dirty entry from 'Cache', so update the reverse info. 642 ValueIsLoadPair CacheKey(Pointer, isLoad); 643 RemoveFromReverseMap(ReverseNonLocalPtrDeps, ScanPos, CacheKey); 644 } else { 645 ++NumUncacheNonLocalPtr; 646 } 647 648 // Scan the block for the dependency. 649 MemDepResult Dep = getPointerDependencyFrom(Pointer, PointeeSize, isLoad, 650 ScanPos, BB); 651 652 // If we had a dirty entry for the block, update it. Otherwise, just add 653 // a new entry. 654 if (ExistingResult) 655 *ExistingResult = Dep; 656 else 657 Cache->push_back(std::make_pair(BB, Dep)); 658 659 // If the block has a dependency (i.e. it isn't completely transparent to 660 // the value), remember the reverse association because we just added it 661 // to Cache! 662 if (Dep.isNonLocal()) 663 return Dep; 664 665 // Keep the ReverseNonLocalPtrDeps map up to date so we can efficiently 666 // update MemDep when we remove instructions. 667 Instruction *Inst = Dep.getInst(); 668 assert(Inst && "Didn't depend on anything?"); 669 ValueIsLoadPair CacheKey(Pointer, isLoad); 670 ReverseNonLocalPtrDeps[Inst].insert(CacheKey); 671 return Dep; 672} 673 674/// SortNonLocalDepInfoCache - Sort the a NonLocalDepInfo cache, given a certain 675/// number of elements in the array that are already properly ordered. This is 676/// optimized for the case when only a few entries are added. 677static void 678SortNonLocalDepInfoCache(MemoryDependenceAnalysis::NonLocalDepInfo &Cache, 679 unsigned NumSortedEntries) { 680 switch (Cache.size() - NumSortedEntries) { 681 case 0: 682 // done, no new entries. 683 break; 684 case 2: { 685 // Two new entries, insert the last one into place. 686 MemoryDependenceAnalysis::NonLocalDepEntry Val = Cache.back(); 687 Cache.pop_back(); 688 MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry = 689 std::upper_bound(Cache.begin(), Cache.end()-1, Val); 690 Cache.insert(Entry, Val); 691 // FALL THROUGH. 692 } 693 case 1: 694 // One new entry, Just insert the new value at the appropriate position. 695 if (Cache.size() != 1) { 696 MemoryDependenceAnalysis::NonLocalDepEntry Val = Cache.back(); 697 Cache.pop_back(); 698 MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry = 699 std::upper_bound(Cache.begin(), Cache.end(), Val); 700 Cache.insert(Entry, Val); 701 } 702 break; 703 default: 704 // Added many values, do a full scale sort. 705 std::sort(Cache.begin(), Cache.end()); 706 break; 707 } 708} 709 710/// isPHITranslatable - Return true if the specified computation is derived from 711/// a PHI node in the current block and if it is simple enough for us to handle. 712static bool isPHITranslatable(Instruction *Inst) { 713 if (isa<PHINode>(Inst)) 714 return true; 715 716 // We can handle bitcast of a PHI, but the PHI needs to be in the same block 717 // as the bitcast. 718 if (BitCastInst *BC = dyn_cast<BitCastInst>(Inst)) { 719 Instruction *OpI = dyn_cast<Instruction>(BC->getOperand(0)); 720 if (OpI == 0 || OpI->getParent() != Inst->getParent()) 721 return true; 722 return isPHITranslatable(OpI); 723 } 724 725 // We can translate a GEP if all of its operands defined in this block are phi 726 // translatable. 727 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Inst)) { 728 for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i) { 729 Instruction *OpI = dyn_cast<Instruction>(GEP->getOperand(i)); 730 if (OpI == 0 || OpI->getParent() != Inst->getParent()) 731 continue; 732 733 if (!isPHITranslatable(OpI)) 734 return false; 735 } 736 return true; 737 } 738 739 if (Inst->getOpcode() == Instruction::Add && 740 isa<ConstantInt>(Inst->getOperand(1))) { 741 Instruction *OpI = dyn_cast<Instruction>(Inst->getOperand(0)); 742 if (OpI == 0 || OpI->getParent() != Inst->getParent()) 743 return true; 744 return isPHITranslatable(OpI); 745 } 746 747 // cerr << "MEMDEP: Could not PHI translate: " << *Pointer; 748 // if (isa<BitCastInst>(PtrInst) || isa<GetElementPtrInst>(PtrInst)) 749 // cerr << "OP:\t\t\t\t" << *PtrInst->getOperand(0); 750 751 return false; 752} 753 754/// GetPHITranslatedValue - Given a computation that satisfied the 755/// isPHITranslatable predicate, see if we can translate the computation into 756/// the specified predecessor block. If so, return that value. 757Value *MemoryDependenceAnalysis:: 758GetPHITranslatedValue(Value *InVal, BasicBlock *CurBB, BasicBlock *Pred, 759 const TargetData *TD) const { 760 // If the input value is not an instruction, or if it is not defined in CurBB, 761 // then we don't need to phi translate it. 762 Instruction *Inst = dyn_cast<Instruction>(InVal); 763 if (Inst == 0 || Inst->getParent() != CurBB) 764 return InVal; 765 766 if (PHINode *PN = dyn_cast<PHINode>(Inst)) 767 return PN->getIncomingValueForBlock(Pred); 768 769 // Handle bitcast of PHI. 770 if (BitCastInst *BC = dyn_cast<BitCastInst>(Inst)) { 771 // PHI translate the input operand. 772 Value *PHIIn = GetPHITranslatedValue(BC->getOperand(0), CurBB, Pred, TD); 773 if (PHIIn == 0) return 0; 774 775 // Constants are trivial to phi translate. 776 if (Constant *C = dyn_cast<Constant>(PHIIn)) 777 return ConstantExpr::getBitCast(C, BC->getType()); 778 779 // Otherwise we have to see if a bitcasted version of the incoming pointer 780 // is available. If so, we can use it, otherwise we have to fail. 781 for (Value::use_iterator UI = PHIIn->use_begin(), E = PHIIn->use_end(); 782 UI != E; ++UI) { 783 if (BitCastInst *BCI = dyn_cast<BitCastInst>(*UI)) 784 if (BCI->getType() == BC->getType()) 785 return BCI; 786 } 787 return 0; 788 } 789 790 // Handle getelementptr with at least one PHI translatable operand. 791 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Inst)) { 792 SmallVector<Value*, 8> GEPOps; 793 BasicBlock *CurBB = GEP->getParent(); 794 for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i) { 795 Value *GEPOp = GEP->getOperand(i); 796 // No PHI translation is needed of operands whose values are live in to 797 // the predecessor block. 798 if (!isa<Instruction>(GEPOp) || 799 cast<Instruction>(GEPOp)->getParent() != CurBB) { 800 GEPOps.push_back(GEPOp); 801 continue; 802 } 803 804 // If the operand is a phi node, do phi translation. 805 Value *InOp = GetPHITranslatedValue(GEPOp, CurBB, Pred, TD); 806 if (InOp == 0) return 0; 807 808 GEPOps.push_back(InOp); 809 } 810 811 // Simplify the GEP to handle 'gep x, 0' -> x etc. 812 if (Value *V = SimplifyGEPInst(&GEPOps[0], GEPOps.size(), TD)) 813 return V; 814 815 // Scan to see if we have this GEP available. 816 Value *APHIOp = GEPOps[0]; 817 for (Value::use_iterator UI = APHIOp->use_begin(), E = APHIOp->use_end(); 818 UI != E; ++UI) { 819 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(*UI)) 820 if (GEPI->getType() == GEP->getType() && 821 GEPI->getNumOperands() == GEPOps.size() && 822 GEPI->getParent()->getParent() == CurBB->getParent()) { 823 bool Mismatch = false; 824 for (unsigned i = 0, e = GEPOps.size(); i != e; ++i) 825 if (GEPI->getOperand(i) != GEPOps[i]) { 826 Mismatch = true; 827 break; 828 } 829 if (!Mismatch) 830 return GEPI; 831 } 832 } 833 return 0; 834 } 835 836 // Handle add with a constant RHS. 837 if (Inst->getOpcode() == Instruction::Add && 838 isa<ConstantInt>(Inst->getOperand(1))) { 839 // PHI translate the LHS. 840 Value *LHS; 841 Constant *RHS = cast<ConstantInt>(Inst->getOperand(1)); 842 Instruction *OpI = dyn_cast<Instruction>(Inst->getOperand(0)); 843 bool isNSW = cast<BinaryOperator>(Inst)->hasNoSignedWrap(); 844 bool isNUW = cast<BinaryOperator>(Inst)->hasNoUnsignedWrap(); 845 846 if (OpI == 0 || OpI->getParent() != Inst->getParent()) 847 LHS = Inst->getOperand(0); 848 else { 849 LHS = GetPHITranslatedValue(Inst->getOperand(0), CurBB, Pred, TD); 850 if (LHS == 0) 851 return 0; 852 } 853 854 // If the PHI translated LHS is an add of a constant, fold the immediates. 855 if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(LHS)) 856 if (BOp->getOpcode() == Instruction::Add) 857 if (ConstantInt *CI = dyn_cast<ConstantInt>(BOp->getOperand(1))) { 858 LHS = BOp->getOperand(0); 859 RHS = ConstantExpr::getAdd(RHS, CI); 860 isNSW = isNUW = false; 861 } 862 863 // See if the add simplifies away. 864 if (Value *Res = SimplifyAddInst(LHS, RHS, isNSW, isNUW, TD)) 865 return Res; 866 867 // Otherwise, see if we have this add available somewhere. 868 for (Value::use_iterator UI = LHS->use_begin(), E = LHS->use_end(); 869 UI != E; ++UI) { 870 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(*UI)) 871 if (BO->getOperand(0) == LHS && BO->getOperand(1) == RHS && 872 BO->getParent()->getParent() == CurBB->getParent()) 873 return BO; 874 } 875 876 return 0; 877 } 878 879 return 0; 880} 881 882/// GetAvailablePHITranslatePointer - Return the value computed by 883/// PHITranslatePointer if it dominates PredBB, otherwise return null. 884Value *MemoryDependenceAnalysis:: 885GetAvailablePHITranslatedValue(Value *V, 886 BasicBlock *CurBB, BasicBlock *PredBB, 887 const TargetData *TD, 888 const DominatorTree &DT) const { 889 // See if PHI translation succeeds. 890 V = GetPHITranslatedValue(V, CurBB, PredBB, TD); 891 if (V == 0) return 0; 892 893 // Make sure the value is live in the predecessor. 894 if (Instruction *Inst = dyn_cast_or_null<Instruction>(V)) 895 if (!DT.dominates(Inst->getParent(), PredBB)) 896 return 0; 897 return V; 898} 899 900 901/// InsertPHITranslatedPointer - Insert a computation of the PHI translated 902/// version of 'V' for the edge PredBB->CurBB into the end of the PredBB 903/// block. All newly created instructions are added to the NewInsts list. 904/// 905Value *MemoryDependenceAnalysis:: 906InsertPHITranslatedPointer(Value *InVal, BasicBlock *CurBB, 907 BasicBlock *PredBB, const TargetData *TD, 908 const DominatorTree &DT, 909 SmallVectorImpl<Instruction*> &NewInsts) const { 910 // See if we have a version of this value already available and dominating 911 // PredBB. If so, there is no need to insert a new copy. 912 if (Value *Res = GetAvailablePHITranslatedValue(InVal, CurBB, PredBB, TD, DT)) 913 return Res; 914 915 // If we don't have an available version of this value, it must be an 916 // instruction. 917 Instruction *Inst = cast<Instruction>(InVal); 918 919 // Handle bitcast of PHI translatable value. 920 if (BitCastInst *BC = dyn_cast<BitCastInst>(Inst)) { 921 Value *OpVal = InsertPHITranslatedPointer(BC->getOperand(0), 922 CurBB, PredBB, TD, DT, NewInsts); 923 if (OpVal == 0) return 0; 924 925 // Otherwise insert a bitcast at the end of PredBB. 926 BitCastInst *New = new BitCastInst(OpVal, InVal->getType(), 927 InVal->getName()+".phi.trans.insert", 928 PredBB->getTerminator()); 929 NewInsts.push_back(New); 930 return New; 931 } 932 933 // Handle getelementptr with at least one PHI operand. 934 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Inst)) { 935 SmallVector<Value*, 8> GEPOps; 936 BasicBlock *CurBB = GEP->getParent(); 937 for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i) { 938 Value *OpVal = InsertPHITranslatedPointer(GEP->getOperand(i), 939 CurBB, PredBB, TD, DT, NewInsts); 940 if (OpVal == 0) return 0; 941 GEPOps.push_back(OpVal); 942 } 943 944 GetElementPtrInst *Result = 945 GetElementPtrInst::Create(GEPOps[0], GEPOps.begin()+1, GEPOps.end(), 946 InVal->getName()+".phi.trans.insert", 947 PredBB->getTerminator()); 948 Result->setIsInBounds(GEP->isInBounds()); 949 NewInsts.push_back(Result); 950 return Result; 951 } 952 953#if 0 954 // FIXME: This code works, but it is unclear that we actually want to insert 955 // a big chain of computation in order to make a value available in a block. 956 // This needs to be evaluated carefully to consider its cost trade offs. 957 958 // Handle add with a constant RHS. 959 if (Inst->getOpcode() == Instruction::Add && 960 isa<ConstantInt>(Inst->getOperand(1))) { 961 // PHI translate the LHS. 962 Value *OpVal = InsertPHITranslatedPointer(Inst->getOperand(0), 963 CurBB, PredBB, TD, DT, NewInsts); 964 if (OpVal == 0) return 0; 965 966 BinaryOperator *Res = BinaryOperator::CreateAdd(OpVal, Inst->getOperand(1), 967 InVal->getName()+".phi.trans.insert", 968 PredBB->getTerminator()); 969 Res->setHasNoSignedWrap(cast<BinaryOperator>(Inst)->hasNoSignedWrap()); 970 Res->setHasNoUnsignedWrap(cast<BinaryOperator>(Inst)->hasNoUnsignedWrap()); 971 NewInsts.push_back(Res); 972 return Res; 973 } 974#endif 975 976 return 0; 977} 978 979/// getNonLocalPointerDepFromBB - Perform a dependency query based on 980/// pointer/pointeesize starting at the end of StartBB. Add any clobber/def 981/// results to the results vector and keep track of which blocks are visited in 982/// 'Visited'. 983/// 984/// This has special behavior for the first block queries (when SkipFirstBlock 985/// is true). In this special case, it ignores the contents of the specified 986/// block and starts returning dependence info for its predecessors. 987/// 988/// This function returns false on success, or true to indicate that it could 989/// not compute dependence information for some reason. This should be treated 990/// as a clobber dependence on the first instruction in the predecessor block. 991bool MemoryDependenceAnalysis:: 992getNonLocalPointerDepFromBB(Value *Pointer, uint64_t PointeeSize, 993 bool isLoad, BasicBlock *StartBB, 994 SmallVectorImpl<NonLocalDepEntry> &Result, 995 DenseMap<BasicBlock*, Value*> &Visited, 996 bool SkipFirstBlock) { 997 998 // Look up the cached info for Pointer. 999 ValueIsLoadPair CacheKey(Pointer, isLoad); 1000 1001 std::pair<BBSkipFirstBlockPair, NonLocalDepInfo> *CacheInfo = 1002 &NonLocalPointerDeps[CacheKey]; 1003 NonLocalDepInfo *Cache = &CacheInfo->second; 1004 1005 // If we have valid cached information for exactly the block we are 1006 // investigating, just return it with no recomputation. 1007 if (CacheInfo->first == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) { 1008 // We have a fully cached result for this query then we can just return the 1009 // cached results and populate the visited set. However, we have to verify 1010 // that we don't already have conflicting results for these blocks. Check 1011 // to ensure that if a block in the results set is in the visited set that 1012 // it was for the same pointer query. 1013 if (!Visited.empty()) { 1014 for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end(); 1015 I != E; ++I) { 1016 DenseMap<BasicBlock*, Value*>::iterator VI = Visited.find(I->first); 1017 if (VI == Visited.end() || VI->second == Pointer) continue; 1018 1019 // We have a pointer mismatch in a block. Just return clobber, saying 1020 // that something was clobbered in this result. We could also do a 1021 // non-fully cached query, but there is little point in doing this. 1022 return true; 1023 } 1024 } 1025 1026 for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end(); 1027 I != E; ++I) { 1028 Visited.insert(std::make_pair(I->first, Pointer)); 1029 if (!I->second.isNonLocal()) 1030 Result.push_back(*I); 1031 } 1032 ++NumCacheCompleteNonLocalPtr; 1033 return false; 1034 } 1035 1036 // Otherwise, either this is a new block, a block with an invalid cache 1037 // pointer or one that we're about to invalidate by putting more info into it 1038 // than its valid cache info. If empty, the result will be valid cache info, 1039 // otherwise it isn't. 1040 if (Cache->empty()) 1041 CacheInfo->first = BBSkipFirstBlockPair(StartBB, SkipFirstBlock); 1042 else 1043 CacheInfo->first = BBSkipFirstBlockPair(); 1044 1045 SmallVector<BasicBlock*, 32> Worklist; 1046 Worklist.push_back(StartBB); 1047 1048 // Keep track of the entries that we know are sorted. Previously cached 1049 // entries will all be sorted. The entries we add we only sort on demand (we 1050 // don't insert every element into its sorted position). We know that we 1051 // won't get any reuse from currently inserted values, because we don't 1052 // revisit blocks after we insert info for them. 1053 unsigned NumSortedEntries = Cache->size(); 1054 DEBUG(AssertSorted(*Cache)); 1055 1056 while (!Worklist.empty()) { 1057 BasicBlock *BB = Worklist.pop_back_val(); 1058 1059 // Skip the first block if we have it. 1060 if (!SkipFirstBlock) { 1061 // Analyze the dependency of *Pointer in FromBB. See if we already have 1062 // been here. 1063 assert(Visited.count(BB) && "Should check 'visited' before adding to WL"); 1064 1065 // Get the dependency info for Pointer in BB. If we have cached 1066 // information, we will use it, otherwise we compute it. 1067 DEBUG(AssertSorted(*Cache, NumSortedEntries)); 1068 MemDepResult Dep = GetNonLocalInfoForBlock(Pointer, PointeeSize, isLoad, 1069 BB, Cache, NumSortedEntries); 1070 1071 // If we got a Def or Clobber, add this to the list of results. 1072 if (!Dep.isNonLocal()) { 1073 Result.push_back(NonLocalDepEntry(BB, Dep)); 1074 continue; 1075 } 1076 } 1077 1078 // If 'Pointer' is an instruction defined in this block, then we need to do 1079 // phi translation to change it into a value live in the predecessor block. 1080 // If phi translation fails, then we can't continue dependence analysis. 1081 Instruction *PtrInst = dyn_cast<Instruction>(Pointer); 1082 bool NeedsPHITranslation = PtrInst && PtrInst->getParent() == BB; 1083 1084 // If no PHI translation is needed, just add all the predecessors of this 1085 // block to scan them as well. 1086 if (!NeedsPHITranslation) { 1087 SkipFirstBlock = false; 1088 for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) { 1089 // Verify that we haven't looked at this block yet. 1090 std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool> 1091 InsertRes = Visited.insert(std::make_pair(*PI, Pointer)); 1092 if (InsertRes.second) { 1093 // First time we've looked at *PI. 1094 Worklist.push_back(*PI); 1095 continue; 1096 } 1097 1098 // If we have seen this block before, but it was with a different 1099 // pointer then we have a phi translation failure and we have to treat 1100 // this as a clobber. 1101 if (InsertRes.first->second != Pointer) 1102 goto PredTranslationFailure; 1103 } 1104 continue; 1105 } 1106 1107 // If we do need to do phi translation, then there are a bunch of different 1108 // cases, because we have to find a Value* live in the predecessor block. We 1109 // know that PtrInst is defined in this block at least. 1110 1111 // We may have added values to the cache list before this PHI translation. 1112 // If so, we haven't done anything to ensure that the cache remains sorted. 1113 // Sort it now (if needed) so that recursive invocations of 1114 // getNonLocalPointerDepFromBB and other routines that could reuse the cache 1115 // value will only see properly sorted cache arrays. 1116 if (Cache && NumSortedEntries != Cache->size()) { 1117 SortNonLocalDepInfoCache(*Cache, NumSortedEntries); 1118 NumSortedEntries = Cache->size(); 1119 } 1120 1121 // If this is a computation derived from a PHI node, use the suitably 1122 // translated incoming values for each pred as the phi translated version. 1123 if (!isPHITranslatable(PtrInst)) 1124 goto PredTranslationFailure; 1125 1126 Cache = 0; 1127 1128 for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) { 1129 BasicBlock *Pred = *PI; 1130 // Get the PHI translated pointer in this predecessor. This can fail and 1131 // return null if not translatable. 1132 Value *PredPtr = GetPHITranslatedValue(PtrInst, BB, Pred, TD); 1133 1134 // Check to see if we have already visited this pred block with another 1135 // pointer. If so, we can't do this lookup. This failure can occur 1136 // with PHI translation when a critical edge exists and the PHI node in 1137 // the successor translates to a pointer value different than the 1138 // pointer the block was first analyzed with. 1139 std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool> 1140 InsertRes = Visited.insert(std::make_pair(Pred, PredPtr)); 1141 1142 if (!InsertRes.second) { 1143 // If the predecessor was visited with PredPtr, then we already did 1144 // the analysis and can ignore it. 1145 if (InsertRes.first->second == PredPtr) 1146 continue; 1147 1148 // Otherwise, the block was previously analyzed with a different 1149 // pointer. We can't represent the result of this case, so we just 1150 // treat this as a phi translation failure. 1151 goto PredTranslationFailure; 1152 } 1153 1154 // If PHI translation was unable to find an available pointer in this 1155 // predecessor, then we have to assume that the pointer is clobbered in 1156 // that predecessor. We can still do PRE of the load, which would insert 1157 // a computation of the pointer in this predecessor. 1158 if (PredPtr == 0) { 1159 Result.push_back(NonLocalDepEntry(Pred, 1160 MemDepResult::getClobber(Pred->getTerminator()))); 1161 continue; 1162 } 1163 1164 // FIXME: it is entirely possible that PHI translating will end up with 1165 // the same value. Consider PHI translating something like: 1166 // X = phi [x, bb1], [y, bb2]. PHI translating for bb1 doesn't *need* 1167 // to recurse here, pedantically speaking. 1168 1169 // If we have a problem phi translating, fall through to the code below 1170 // to handle the failure condition. 1171 if (getNonLocalPointerDepFromBB(PredPtr, PointeeSize, isLoad, Pred, 1172 Result, Visited)) 1173 goto PredTranslationFailure; 1174 } 1175 1176 // Refresh the CacheInfo/Cache pointer so that it isn't invalidated. 1177 CacheInfo = &NonLocalPointerDeps[CacheKey]; 1178 Cache = &CacheInfo->second; 1179 NumSortedEntries = Cache->size(); 1180 1181 // Since we did phi translation, the "Cache" set won't contain all of the 1182 // results for the query. This is ok (we can still use it to accelerate 1183 // specific block queries) but we can't do the fastpath "return all 1184 // results from the set" Clear out the indicator for this. 1185 CacheInfo->first = BBSkipFirstBlockPair(); 1186 SkipFirstBlock = false; 1187 continue; 1188 1189 PredTranslationFailure: 1190 1191 if (Cache == 0) { 1192 // Refresh the CacheInfo/Cache pointer if it got invalidated. 1193 CacheInfo = &NonLocalPointerDeps[CacheKey]; 1194 Cache = &CacheInfo->second; 1195 NumSortedEntries = Cache->size(); 1196 } 1197 1198 // Since we did phi translation, the "Cache" set won't contain all of the 1199 // results for the query. This is ok (we can still use it to accelerate 1200 // specific block queries) but we can't do the fastpath "return all 1201 // results from the set" Clear out the indicator for this. 1202 CacheInfo->first = BBSkipFirstBlockPair(); 1203 1204 // If *nothing* works, mark the pointer as being clobbered by the first 1205 // instruction in this block. 1206 // 1207 // If this is the magic first block, return this as a clobber of the whole 1208 // incoming value. Since we can't phi translate to one of the predecessors, 1209 // we have to bail out. 1210 if (SkipFirstBlock) 1211 return true; 1212 1213 for (NonLocalDepInfo::reverse_iterator I = Cache->rbegin(); ; ++I) { 1214 assert(I != Cache->rend() && "Didn't find current block??"); 1215 if (I->first != BB) 1216 continue; 1217 1218 assert(I->second.isNonLocal() && 1219 "Should only be here with transparent block"); 1220 I->second = MemDepResult::getClobber(BB->begin()); 1221 ReverseNonLocalPtrDeps[BB->begin()].insert(CacheKey); 1222 Result.push_back(*I); 1223 break; 1224 } 1225 } 1226 1227 // Okay, we're done now. If we added new values to the cache, re-sort it. 1228 SortNonLocalDepInfoCache(*Cache, NumSortedEntries); 1229 DEBUG(AssertSorted(*Cache)); 1230 return false; 1231} 1232 1233/// RemoveCachedNonLocalPointerDependencies - If P exists in 1234/// CachedNonLocalPointerInfo, remove it. 1235void MemoryDependenceAnalysis:: 1236RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair P) { 1237 CachedNonLocalPointerInfo::iterator It = 1238 NonLocalPointerDeps.find(P); 1239 if (It == NonLocalPointerDeps.end()) return; 1240 1241 // Remove all of the entries in the BB->val map. This involves removing 1242 // instructions from the reverse map. 1243 NonLocalDepInfo &PInfo = It->second.second; 1244 1245 for (unsigned i = 0, e = PInfo.size(); i != e; ++i) { 1246 Instruction *Target = PInfo[i].second.getInst(); 1247 if (Target == 0) continue; // Ignore non-local dep results. 1248 assert(Target->getParent() == PInfo[i].first); 1249 1250 // Eliminating the dirty entry from 'Cache', so update the reverse info. 1251 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Target, P); 1252 } 1253 1254 // Remove P from NonLocalPointerDeps (which deletes NonLocalDepInfo). 1255 NonLocalPointerDeps.erase(It); 1256} 1257 1258 1259/// invalidateCachedPointerInfo - This method is used to invalidate cached 1260/// information about the specified pointer, because it may be too 1261/// conservative in memdep. This is an optional call that can be used when 1262/// the client detects an equivalence between the pointer and some other 1263/// value and replaces the other value with ptr. This can make Ptr available 1264/// in more places that cached info does not necessarily keep. 1265void MemoryDependenceAnalysis::invalidateCachedPointerInfo(Value *Ptr) { 1266 // If Ptr isn't really a pointer, just ignore it. 1267 if (!isa<PointerType>(Ptr->getType())) return; 1268 // Flush store info for the pointer. 1269 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, false)); 1270 // Flush load info for the pointer. 1271 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, true)); 1272} 1273 1274/// removeInstruction - Remove an instruction from the dependence analysis, 1275/// updating the dependence of instructions that previously depended on it. 1276/// This method attempts to keep the cache coherent using the reverse map. 1277void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) { 1278 // Walk through the Non-local dependencies, removing this one as the value 1279 // for any cached queries. 1280 NonLocalDepMapType::iterator NLDI = NonLocalDeps.find(RemInst); 1281 if (NLDI != NonLocalDeps.end()) { 1282 NonLocalDepInfo &BlockMap = NLDI->second.first; 1283 for (NonLocalDepInfo::iterator DI = BlockMap.begin(), DE = BlockMap.end(); 1284 DI != DE; ++DI) 1285 if (Instruction *Inst = DI->second.getInst()) 1286 RemoveFromReverseMap(ReverseNonLocalDeps, Inst, RemInst); 1287 NonLocalDeps.erase(NLDI); 1288 } 1289 1290 // If we have a cached local dependence query for this instruction, remove it. 1291 // 1292 LocalDepMapType::iterator LocalDepEntry = LocalDeps.find(RemInst); 1293 if (LocalDepEntry != LocalDeps.end()) { 1294 // Remove us from DepInst's reverse set now that the local dep info is gone. 1295 if (Instruction *Inst = LocalDepEntry->second.getInst()) 1296 RemoveFromReverseMap(ReverseLocalDeps, Inst, RemInst); 1297 1298 // Remove this local dependency info. 1299 LocalDeps.erase(LocalDepEntry); 1300 } 1301 1302 // If we have any cached pointer dependencies on this instruction, remove 1303 // them. If the instruction has non-pointer type, then it can't be a pointer 1304 // base. 1305 1306 // Remove it from both the load info and the store info. The instruction 1307 // can't be in either of these maps if it is non-pointer. 1308 if (isa<PointerType>(RemInst->getType())) { 1309 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, false)); 1310 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, true)); 1311 } 1312 1313 // Loop over all of the things that depend on the instruction we're removing. 1314 // 1315 SmallVector<std::pair<Instruction*, Instruction*>, 8> ReverseDepsToAdd; 1316 1317 // If we find RemInst as a clobber or Def in any of the maps for other values, 1318 // we need to replace its entry with a dirty version of the instruction after 1319 // it. If RemInst is a terminator, we use a null dirty value. 1320 // 1321 // Using a dirty version of the instruction after RemInst saves having to scan 1322 // the entire block to get to this point. 1323 MemDepResult NewDirtyVal; 1324 if (!RemInst->isTerminator()) 1325 NewDirtyVal = MemDepResult::getDirty(++BasicBlock::iterator(RemInst)); 1326 1327 ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst); 1328 if (ReverseDepIt != ReverseLocalDeps.end()) { 1329 SmallPtrSet<Instruction*, 4> &ReverseDeps = ReverseDepIt->second; 1330 // RemInst can't be the terminator if it has local stuff depending on it. 1331 assert(!ReverseDeps.empty() && !isa<TerminatorInst>(RemInst) && 1332 "Nothing can locally depend on a terminator"); 1333 1334 for (SmallPtrSet<Instruction*, 4>::iterator I = ReverseDeps.begin(), 1335 E = ReverseDeps.end(); I != E; ++I) { 1336 Instruction *InstDependingOnRemInst = *I; 1337 assert(InstDependingOnRemInst != RemInst && 1338 "Already removed our local dep info"); 1339 1340 LocalDeps[InstDependingOnRemInst] = NewDirtyVal; 1341 1342 // Make sure to remember that new things depend on NewDepInst. 1343 assert(NewDirtyVal.getInst() && "There is no way something else can have " 1344 "a local dep on this if it is a terminator!"); 1345 ReverseDepsToAdd.push_back(std::make_pair(NewDirtyVal.getInst(), 1346 InstDependingOnRemInst)); 1347 } 1348 1349 ReverseLocalDeps.erase(ReverseDepIt); 1350 1351 // Add new reverse deps after scanning the set, to avoid invalidating the 1352 // 'ReverseDeps' reference. 1353 while (!ReverseDepsToAdd.empty()) { 1354 ReverseLocalDeps[ReverseDepsToAdd.back().first] 1355 .insert(ReverseDepsToAdd.back().second); 1356 ReverseDepsToAdd.pop_back(); 1357 } 1358 } 1359 1360 ReverseDepIt = ReverseNonLocalDeps.find(RemInst); 1361 if (ReverseDepIt != ReverseNonLocalDeps.end()) { 1362 SmallPtrSet<Instruction*, 4> &Set = ReverseDepIt->second; 1363 for (SmallPtrSet<Instruction*, 4>::iterator I = Set.begin(), E = Set.end(); 1364 I != E; ++I) { 1365 assert(*I != RemInst && "Already removed NonLocalDep info for RemInst"); 1366 1367 PerInstNLInfo &INLD = NonLocalDeps[*I]; 1368 // The information is now dirty! 1369 INLD.second = true; 1370 1371 for (NonLocalDepInfo::iterator DI = INLD.first.begin(), 1372 DE = INLD.first.end(); DI != DE; ++DI) { 1373 if (DI->second.getInst() != RemInst) continue; 1374 1375 // Convert to a dirty entry for the subsequent instruction. 1376 DI->second = NewDirtyVal; 1377 1378 if (Instruction *NextI = NewDirtyVal.getInst()) 1379 ReverseDepsToAdd.push_back(std::make_pair(NextI, *I)); 1380 } 1381 } 1382 1383 ReverseNonLocalDeps.erase(ReverseDepIt); 1384 1385 // Add new reverse deps after scanning the set, to avoid invalidating 'Set' 1386 while (!ReverseDepsToAdd.empty()) { 1387 ReverseNonLocalDeps[ReverseDepsToAdd.back().first] 1388 .insert(ReverseDepsToAdd.back().second); 1389 ReverseDepsToAdd.pop_back(); 1390 } 1391 } 1392 1393 // If the instruction is in ReverseNonLocalPtrDeps then it appears as a 1394 // value in the NonLocalPointerDeps info. 1395 ReverseNonLocalPtrDepTy::iterator ReversePtrDepIt = 1396 ReverseNonLocalPtrDeps.find(RemInst); 1397 if (ReversePtrDepIt != ReverseNonLocalPtrDeps.end()) { 1398 SmallPtrSet<ValueIsLoadPair, 4> &Set = ReversePtrDepIt->second; 1399 SmallVector<std::pair<Instruction*, ValueIsLoadPair>,8> ReversePtrDepsToAdd; 1400 1401 for (SmallPtrSet<ValueIsLoadPair, 4>::iterator I = Set.begin(), 1402 E = Set.end(); I != E; ++I) { 1403 ValueIsLoadPair P = *I; 1404 assert(P.getPointer() != RemInst && 1405 "Already removed NonLocalPointerDeps info for RemInst"); 1406 1407 NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].second; 1408 1409 // The cache is not valid for any specific block anymore. 1410 NonLocalPointerDeps[P].first = BBSkipFirstBlockPair(); 1411 1412 // Update any entries for RemInst to use the instruction after it. 1413 for (NonLocalDepInfo::iterator DI = NLPDI.begin(), DE = NLPDI.end(); 1414 DI != DE; ++DI) { 1415 if (DI->second.getInst() != RemInst) continue; 1416 1417 // Convert to a dirty entry for the subsequent instruction. 1418 DI->second = NewDirtyVal; 1419 1420 if (Instruction *NewDirtyInst = NewDirtyVal.getInst()) 1421 ReversePtrDepsToAdd.push_back(std::make_pair(NewDirtyInst, P)); 1422 } 1423 1424 // Re-sort the NonLocalDepInfo. Changing the dirty entry to its 1425 // subsequent value may invalidate the sortedness. 1426 std::sort(NLPDI.begin(), NLPDI.end()); 1427 } 1428 1429 ReverseNonLocalPtrDeps.erase(ReversePtrDepIt); 1430 1431 while (!ReversePtrDepsToAdd.empty()) { 1432 ReverseNonLocalPtrDeps[ReversePtrDepsToAdd.back().first] 1433 .insert(ReversePtrDepsToAdd.back().second); 1434 ReversePtrDepsToAdd.pop_back(); 1435 } 1436 } 1437 1438 1439 assert(!NonLocalDeps.count(RemInst) && "RemInst got reinserted?"); 1440 AA->deleteValue(RemInst); 1441 DEBUG(verifyRemoved(RemInst)); 1442} 1443/// verifyRemoved - Verify that the specified instruction does not occur 1444/// in our internal data structures. 1445void MemoryDependenceAnalysis::verifyRemoved(Instruction *D) const { 1446 for (LocalDepMapType::const_iterator I = LocalDeps.begin(), 1447 E = LocalDeps.end(); I != E; ++I) { 1448 assert(I->first != D && "Inst occurs in data structures"); 1449 assert(I->second.getInst() != D && 1450 "Inst occurs in data structures"); 1451 } 1452 1453 for (CachedNonLocalPointerInfo::const_iterator I =NonLocalPointerDeps.begin(), 1454 E = NonLocalPointerDeps.end(); I != E; ++I) { 1455 assert(I->first.getPointer() != D && "Inst occurs in NLPD map key"); 1456 const NonLocalDepInfo &Val = I->second.second; 1457 for (NonLocalDepInfo::const_iterator II = Val.begin(), E = Val.end(); 1458 II != E; ++II) 1459 assert(II->second.getInst() != D && "Inst occurs as NLPD value"); 1460 } 1461 1462 for (NonLocalDepMapType::const_iterator I = NonLocalDeps.begin(), 1463 E = NonLocalDeps.end(); I != E; ++I) { 1464 assert(I->first != D && "Inst occurs in data structures"); 1465 const PerInstNLInfo &INLD = I->second; 1466 for (NonLocalDepInfo::const_iterator II = INLD.first.begin(), 1467 EE = INLD.first.end(); II != EE; ++II) 1468 assert(II->second.getInst() != D && "Inst occurs in data structures"); 1469 } 1470 1471 for (ReverseDepMapType::const_iterator I = ReverseLocalDeps.begin(), 1472 E = ReverseLocalDeps.end(); I != E; ++I) { 1473 assert(I->first != D && "Inst occurs in data structures"); 1474 for (SmallPtrSet<Instruction*, 4>::const_iterator II = I->second.begin(), 1475 EE = I->second.end(); II != EE; ++II) 1476 assert(*II != D && "Inst occurs in data structures"); 1477 } 1478 1479 for (ReverseDepMapType::const_iterator I = ReverseNonLocalDeps.begin(), 1480 E = ReverseNonLocalDeps.end(); 1481 I != E; ++I) { 1482 assert(I->first != D && "Inst occurs in data structures"); 1483 for (SmallPtrSet<Instruction*, 4>::const_iterator II = I->second.begin(), 1484 EE = I->second.end(); II != EE; ++II) 1485 assert(*II != D && "Inst occurs in data structures"); 1486 } 1487 1488 for (ReverseNonLocalPtrDepTy::const_iterator 1489 I = ReverseNonLocalPtrDeps.begin(), 1490 E = ReverseNonLocalPtrDeps.end(); I != E; ++I) { 1491 assert(I->first != D && "Inst occurs in rev NLPD map"); 1492 1493 for (SmallPtrSet<ValueIsLoadPair, 4>::const_iterator II = I->second.begin(), 1494 E = I->second.end(); II != E; ++II) 1495 assert(*II != ValueIsLoadPair(D, false) && 1496 *II != ValueIsLoadPair(D, true) && 1497 "Inst occurs in ReverseNonLocalPtrDeps map"); 1498 } 1499 1500} 1501