MemoryDependenceAnalysis.cpp revision 73ec3cdd7140aee6d2b9ac32bc2298254ff48c97
1//===- MemoryDependenceAnalysis.cpp - Mem Deps Implementation --*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements an analysis that determines, for a given memory 11// operation, what preceding memory operations it depends on. It builds on 12// alias analysis information, and tries to provide a lazy, caching interface to 13// a common kind of alias information query. 14// 15//===----------------------------------------------------------------------===// 16 17#define DEBUG_TYPE "memdep" 18#include "llvm/Analysis/MemoryDependenceAnalysis.h" 19#include "llvm/Constants.h" 20#include "llvm/Instructions.h" 21#include "llvm/Function.h" 22#include "llvm/Analysis/AliasAnalysis.h" 23#include "llvm/ADT/Statistic.h" 24#include "llvm/ADT/STLExtras.h" 25#include "llvm/Support/CFG.h" 26#include "llvm/Support/CommandLine.h" 27#include "llvm/Support/Debug.h" 28#include "llvm/Target/TargetData.h" 29using namespace llvm; 30 31STATISTIC(NumCacheNonLocal, "Number of cached non-local responses"); 32STATISTIC(NumUncacheNonLocal, "Number of uncached non-local responses"); 33 34char MemoryDependenceAnalysis::ID = 0; 35 36// Register this pass... 37static RegisterPass<MemoryDependenceAnalysis> X("memdep", 38 "Memory Dependence Analysis", false, true); 39 40/// getAnalysisUsage - Does not modify anything. It uses Alias Analysis. 41/// 42void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { 43 AU.setPreservesAll(); 44 AU.addRequiredTransitive<AliasAnalysis>(); 45 AU.addRequiredTransitive<TargetData>(); 46} 47 48/// getCallSiteDependency - Private helper for finding the local dependencies 49/// of a call site. 50MemoryDependenceAnalysis::DepResultTy MemoryDependenceAnalysis:: 51getCallSiteDependency(CallSite C, BasicBlock::iterator ScanIt, 52 BasicBlock *BB) { 53 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 54 TargetData &TD = getAnalysis<TargetData>(); 55 56 // Walk backwards through the block, looking for dependencies 57 while (ScanIt != BB->begin()) { 58 Instruction *Inst = --ScanIt; 59 60 // If this inst is a memory op, get the pointer it accessed 61 Value *Pointer = 0; 62 uint64_t PointerSize = 0; 63 if (StoreInst *S = dyn_cast<StoreInst>(Inst)) { 64 Pointer = S->getPointerOperand(); 65 PointerSize = TD.getTypeStoreSize(S->getOperand(0)->getType()); 66 } else if (AllocationInst *AI = dyn_cast<AllocationInst>(Inst)) { 67 Pointer = AI; 68 if (ConstantInt *C = dyn_cast<ConstantInt>(AI->getArraySize())) 69 // Use ABI size (size between elements), not store size (size of one 70 // element without padding). 71 PointerSize = C->getZExtValue() * 72 TD.getABITypeSize(AI->getAllocatedType()); 73 else 74 PointerSize = ~0UL; 75 } else if (VAArgInst *V = dyn_cast<VAArgInst>(Inst)) { 76 Pointer = V->getOperand(0); 77 PointerSize = TD.getTypeStoreSize(V->getType()); 78 } else if (FreeInst *F = dyn_cast<FreeInst>(Inst)) { 79 Pointer = F->getPointerOperand(); 80 81 // FreeInsts erase the entire structure 82 PointerSize = ~0UL; 83 } else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) { 84 if (AA.getModRefBehavior(CallSite::get(Inst)) == 85 AliasAnalysis::DoesNotAccessMemory) 86 continue; 87 return DepResultTy(Inst, Normal); 88 } else 89 continue; 90 91 if (AA.getModRefInfo(C, Pointer, PointerSize) != AliasAnalysis::NoModRef) 92 return DepResultTy(Inst, Normal); 93 } 94 95 // No dependence found. 96 return DepResultTy(0, NonLocal); 97} 98 99/// getDependency - Return the instruction on which a memory operation 100/// depends. The local parameter indicates if the query should only 101/// evaluate dependencies within the same basic block. 102MemoryDependenceAnalysis::DepResultTy MemoryDependenceAnalysis:: 103getDependencyFromInternal(Instruction *QueryInst, BasicBlock::iterator ScanIt, 104 BasicBlock *BB) { 105 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 106 TargetData &TD = getAnalysis<TargetData>(); 107 108 // Get the pointer value for which dependence will be determined 109 Value *MemPtr = 0; 110 uint64_t MemSize = 0; 111 bool MemVolatile = false; 112 113 if (StoreInst* S = dyn_cast<StoreInst>(QueryInst)) { 114 MemPtr = S->getPointerOperand(); 115 MemSize = TD.getTypeStoreSize(S->getOperand(0)->getType()); 116 MemVolatile = S->isVolatile(); 117 } else if (LoadInst* L = dyn_cast<LoadInst>(QueryInst)) { 118 MemPtr = L->getPointerOperand(); 119 MemSize = TD.getTypeStoreSize(L->getType()); 120 MemVolatile = L->isVolatile(); 121 } else if (VAArgInst* V = dyn_cast<VAArgInst>(QueryInst)) { 122 MemPtr = V->getOperand(0); 123 MemSize = TD.getTypeStoreSize(V->getType()); 124 } else if (FreeInst* F = dyn_cast<FreeInst>(QueryInst)) { 125 MemPtr = F->getPointerOperand(); 126 // FreeInsts erase the entire structure, not just a field. 127 MemSize = ~0UL; 128 } else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) 129 return getCallSiteDependency(CallSite::get(QueryInst), ScanIt, BB); 130 else // Non-memory instructions depend on nothing. 131 return DepResultTy(0, None); 132 133 // Walk backwards through the basic block, looking for dependencies 134 while (ScanIt != BB->begin()) { 135 Instruction *Inst = --ScanIt; 136 137 // If the access is volatile and this is a volatile load/store, return a 138 // dependence. 139 if (MemVolatile && 140 ((isa<LoadInst>(Inst) && cast<LoadInst>(Inst)->isVolatile()) || 141 (isa<StoreInst>(Inst) && cast<StoreInst>(Inst)->isVolatile()))) 142 return DepResultTy(Inst, Normal); 143 144 // MemDep is broken w.r.t. loads: it says that two loads of the same pointer 145 // depend on each other. :( 146 if (LoadInst *L = dyn_cast<LoadInst>(Inst)) { 147 Value *Pointer = L->getPointerOperand(); 148 uint64_t PointerSize = TD.getTypeStoreSize(L->getType()); 149 150 // If we found a pointer, check if it could be the same as our pointer 151 AliasAnalysis::AliasResult R = 152 AA.alias(Pointer, PointerSize, MemPtr, MemSize); 153 154 if (R == AliasAnalysis::NoAlias) 155 continue; 156 157 // May-alias loads don't depend on each other without a dependence. 158 if (isa<LoadInst>(QueryInst) && R == AliasAnalysis::MayAlias) 159 continue; 160 return DepResultTy(Inst, Normal); 161 } 162 163 // FIXME: This claims that an access depends on the allocation. This may 164 // make sense, but is dubious at best. It would be better to fix GVN to 165 // handle a 'None' Query. 166 if (AllocationInst *AI = dyn_cast<AllocationInst>(Inst)) { 167 Value *Pointer = AI; 168 uint64_t PointerSize; 169 if (ConstantInt *C = dyn_cast<ConstantInt>(AI->getArraySize())) 170 // Use ABI size (size between elements), not store size (size of one 171 // element without padding). 172 PointerSize = C->getZExtValue() * 173 TD.getABITypeSize(AI->getAllocatedType()); 174 else 175 PointerSize = ~0UL; 176 177 AliasAnalysis::AliasResult R = 178 AA.alias(Pointer, PointerSize, MemPtr, MemSize); 179 180 if (R == AliasAnalysis::NoAlias) 181 continue; 182 return DepResultTy(Inst, Normal); 183 } 184 185 186 // See if this instruction mod/ref's the pointer. 187 AliasAnalysis::ModRefResult MRR = AA.getModRefInfo(Inst, MemPtr, MemSize); 188 189 if (MRR == AliasAnalysis::NoModRef) 190 continue; 191 192 // Loads don't depend on read-only instructions. 193 if (isa<LoadInst>(QueryInst) && MRR == AliasAnalysis::Ref) 194 continue; 195 196 // Otherwise, there is a dependence. 197 return DepResultTy(Inst, Normal); 198 } 199 200 // If we found nothing, return the non-local flag. 201 return DepResultTy(0, NonLocal); 202} 203 204/// getDependency - Return the instruction on which a memory operation 205/// depends. 206MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) { 207 Instruction *ScanPos = QueryInst; 208 209 // Check for a cached result 210 DepResultTy &LocalCache = LocalDeps[QueryInst]; 211 212 // If the cached entry is non-dirty, just return it. Note that this depends 213 // on DepResultTy's default constructing to 'dirty'. 214 if (LocalCache.getInt() != Dirty) 215 return ConvToResult(LocalCache); 216 217 // Otherwise, if we have a dirty entry, we know we can start the scan at that 218 // instruction, which may save us some work. 219 if (Instruction *Inst = LocalCache.getPointer()) 220 ScanPos = Inst; 221 222 // Do the scan. 223 LocalCache = getDependencyFromInternal(QueryInst, ScanPos, 224 QueryInst->getParent()); 225 226 // Remember the result! 227 if (Instruction *I = LocalCache.getPointer()) 228 ReverseLocalDeps[I].insert(QueryInst); 229 230 return ConvToResult(LocalCache); 231} 232 233/// getNonLocalDependency - Perform a full dependency query for the 234/// specified instruction, returning the set of blocks that the value is 235/// potentially live across. The returned set of results will include a 236/// "NonLocal" result for all blocks where the value is live across. 237/// 238/// This method assumes the instruction returns a "nonlocal" dependency 239/// within its own block. 240/// 241void MemoryDependenceAnalysis:: 242getNonLocalDependency(Instruction *QueryInst, 243 SmallVectorImpl<std::pair<BasicBlock*, 244 MemDepResult> > &Result) { 245 assert(getDependency(QueryInst).isNonLocal() && 246 "getNonLocalDependency should only be used on insts with non-local deps!"); 247 DenseMap<BasicBlock*, DepResultTy> &Cache = NonLocalDeps[QueryInst]; 248 249 /// DirtyBlocks - This is the set of blocks that need to be recomputed. In 250 /// the cached case, this can happen due to instructions being deleted etc. In 251 /// the uncached case, this starts out as the set of predecessors we care 252 /// about. 253 SmallVector<BasicBlock*, 32> DirtyBlocks; 254 255 if (!Cache.empty()) { 256 // If we already have a partially computed set of results, scan them to 257 // determine what is dirty, seeding our initial DirtyBlocks worklist. 258 // FIXME: In the "don't need to be updated" case, this is expensive, why not 259 // have a per-"cache" flag saying it is undirty? 260 for (DenseMap<BasicBlock*, DepResultTy>::iterator I = Cache.begin(), 261 E = Cache.end(); I != E; ++I) 262 if (I->second.getInt() == Dirty) 263 DirtyBlocks.push_back(I->first); 264 265 NumCacheNonLocal++; 266 267 //cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: " 268 // << Cache.size() << " cached: " << *QueryInst; 269 } else { 270 // Seed DirtyBlocks with each of the preds of QueryInst's block. 271 BasicBlock *QueryBB = QueryInst->getParent(); 272 DirtyBlocks.append(pred_begin(QueryBB), pred_end(QueryBB)); 273 NumUncacheNonLocal++; 274 } 275 276 // Iterate while we still have blocks to update. 277 while (!DirtyBlocks.empty()) { 278 BasicBlock *DirtyBB = DirtyBlocks.back(); 279 DirtyBlocks.pop_back(); 280 281 // Get the entry for this block. Note that this relies on DepResultTy 282 // default initializing to Dirty. 283 DepResultTy &DirtyBBEntry = Cache[DirtyBB]; 284 285 // If DirtyBBEntry isn't dirty, it ended up on the worklist multiple times. 286 if (DirtyBBEntry.getInt() != Dirty) continue; 287 288 // If the dirty entry has a pointer, start scanning from it so we don't have 289 // to rescan the entire block. 290 BasicBlock::iterator ScanPos = DirtyBB->end(); 291 if (Instruction *Inst = DirtyBBEntry.getPointer()) 292 ScanPos = Inst; 293 294 // Find out if this block has a local dependency for QueryInst. 295 DirtyBBEntry = getDependencyFromInternal(QueryInst, ScanPos, DirtyBB); 296 297 // If the block has a dependency (i.e. it isn't completely transparent to 298 // the value), remember it! 299 if (DirtyBBEntry.getInt() != NonLocal) { 300 // Keep the ReverseNonLocalDeps map up to date so we can efficiently 301 // update this when we remove instructions. 302 if (Instruction *Inst = DirtyBBEntry.getPointer()) 303 ReverseNonLocalDeps[Inst].insert(QueryInst); 304 continue; 305 } 306 307 // If the block *is* completely transparent to the load, we need to check 308 // the predecessors of this block. Add them to our worklist. 309 DirtyBlocks.append(pred_begin(DirtyBB), pred_end(DirtyBB)); 310 } 311 312 313 // Copy the result into the output set. 314 for (DenseMap<BasicBlock*, DepResultTy>::iterator I = Cache.begin(), 315 E = Cache.end(); I != E; ++I) 316 Result.push_back(std::make_pair(I->first, ConvToResult(I->second))); 317} 318 319/// removeInstruction - Remove an instruction from the dependence analysis, 320/// updating the dependence of instructions that previously depended on it. 321/// This method attempts to keep the cache coherent using the reverse map. 322void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) { 323 // Walk through the Non-local dependencies, removing this one as the value 324 // for any cached queries. 325 for (DenseMap<BasicBlock*, DepResultTy>::iterator DI = 326 NonLocalDeps[RemInst].begin(), DE = NonLocalDeps[RemInst].end(); 327 DI != DE; ++DI) 328 if (Instruction *Inst = DI->second.getPointer()) 329 ReverseNonLocalDeps[Inst].erase(RemInst); 330 331 // If we have a cached local dependence query for this instruction, remove it. 332 // 333 LocalDepMapType::iterator LocalDepEntry = LocalDeps.find(RemInst); 334 if (LocalDepEntry != LocalDeps.end()) { 335 // Remove us from DepInst's reverse set now that the local dep info is gone. 336 if (Instruction *Inst = LocalDepEntry->second.getPointer()) { 337 SmallPtrSet<Instruction*, 4> &RLD = ReverseLocalDeps[Inst]; 338 RLD.erase(RemInst); 339 if (RLD.empty()) 340 ReverseLocalDeps.erase(Inst); 341 } 342 343 // Remove this local dependency info. 344 LocalDeps.erase(LocalDepEntry); 345 } 346 347 // Loop over all of the things that depend on the instruction we're removing. 348 // 349 SmallVector<std::pair<Instruction*, Instruction*>, 8> ReverseDepsToAdd; 350 351 ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst); 352 if (ReverseDepIt != ReverseLocalDeps.end()) { 353 SmallPtrSet<Instruction*, 4> &ReverseDeps = ReverseDepIt->second; 354 // RemInst can't be the terminator if it has stuff depending on it. 355 assert(!ReverseDeps.empty() && !isa<TerminatorInst>(RemInst) && 356 "Nothing can locally depend on a terminator"); 357 358 // Anything that was locally dependent on RemInst is now going to be 359 // dependent on the instruction after RemInst. It will have the dirty flag 360 // set so it will rescan. This saves having to scan the entire block to get 361 // to this point. 362 Instruction *NewDepInst = next(BasicBlock::iterator(RemInst)); 363 364 for (SmallPtrSet<Instruction*, 4>::iterator I = ReverseDeps.begin(), 365 E = ReverseDeps.end(); I != E; ++I) { 366 Instruction *InstDependingOnRemInst = *I; 367 368 // If we thought the instruction depended on itself (possible for 369 // unconfirmed dependencies) ignore the update. 370 if (InstDependingOnRemInst == RemInst) continue; 371 372 LocalDeps[InstDependingOnRemInst] = DepResultTy(NewDepInst, Dirty); 373 374 // Make sure to remember that new things depend on NewDepInst. 375 ReverseDepsToAdd.push_back(std::make_pair(NewDepInst, 376 InstDependingOnRemInst)); 377 } 378 379 ReverseLocalDeps.erase(ReverseDepIt); 380 381 // Add new reverse deps after scanning the set, to avoid invalidating the 382 // 'ReverseDeps' reference. 383 while (!ReverseDepsToAdd.empty()) { 384 ReverseLocalDeps[ReverseDepsToAdd.back().first] 385 .insert(ReverseDepsToAdd.back().second); 386 ReverseDepsToAdd.pop_back(); 387 } 388 } 389 390 ReverseDepIt = ReverseNonLocalDeps.find(RemInst); 391 if (ReverseDepIt != ReverseNonLocalDeps.end()) { 392 SmallPtrSet<Instruction*, 4>& set = ReverseDepIt->second; 393 for (SmallPtrSet<Instruction*, 4>::iterator I = set.begin(), E = set.end(); 394 I != E; ++I) 395 for (DenseMap<BasicBlock*, DepResultTy>::iterator 396 DI = NonLocalDeps[*I].begin(), DE = NonLocalDeps[*I].end(); 397 DI != DE; ++DI) 398 if (DI->second.getPointer() == RemInst) { 399 // Convert to a dirty entry for the subsequent instruction. 400 DI->second.setInt(Dirty); 401 if (RemInst->isTerminator()) 402 DI->second.setPointer(0); 403 else { 404 Instruction *NextI = next(BasicBlock::iterator(RemInst)); 405 DI->second.setPointer(NextI); 406 assert(NextI != RemInst); 407 ReverseDepsToAdd.push_back(std::make_pair(NextI, *I)); 408 } 409 } 410 411 ReverseNonLocalDeps.erase(ReverseDepIt); 412 413 // Add new reverse deps after scanning the set, to avoid invalidating 'Set' 414 while (!ReverseDepsToAdd.empty()) { 415 ReverseNonLocalDeps[ReverseDepsToAdd.back().first] 416 .insert(ReverseDepsToAdd.back().second); 417 ReverseDepsToAdd.pop_back(); 418 } 419 } 420 421 NonLocalDeps.erase(RemInst); 422 getAnalysis<AliasAnalysis>().deleteValue(RemInst); 423 DEBUG(verifyRemoved(RemInst)); 424} 425 426/// verifyRemoved - Verify that the specified instruction does not occur 427/// in our internal data structures. 428void MemoryDependenceAnalysis::verifyRemoved(Instruction *D) const { 429 for (LocalDepMapType::const_iterator I = LocalDeps.begin(), 430 E = LocalDeps.end(); I != E; ++I) { 431 assert(I->first != D && "Inst occurs in data structures"); 432 assert(I->second.getPointer() != D && 433 "Inst occurs in data structures"); 434 } 435 436 for (NonLocalDepMapType::const_iterator I = NonLocalDeps.begin(), 437 E = NonLocalDeps.end(); I != E; ++I) { 438 assert(I->first != D && "Inst occurs in data structures"); 439 for (DenseMap<BasicBlock*, DepResultTy>::iterator II = I->second.begin(), 440 EE = I->second.end(); II != EE; ++II) 441 assert(II->second.getPointer() != D && "Inst occurs in data structures"); 442 } 443 444 for (ReverseDepMapType::const_iterator I = ReverseLocalDeps.begin(), 445 E = ReverseLocalDeps.end(); I != E; ++I) 446 for (SmallPtrSet<Instruction*, 4>::const_iterator II = I->second.begin(), 447 EE = I->second.end(); II != EE; ++II) 448 assert(*II != D && "Inst occurs in data structures"); 449 450 for (ReverseDepMapType::const_iterator I = ReverseNonLocalDeps.begin(), 451 E = ReverseNonLocalDeps.end(); 452 I != E; ++I) 453 for (SmallPtrSet<Instruction*, 4>::const_iterator II = I->second.begin(), 454 EE = I->second.end(); II != EE; ++II) 455 assert(*II != D && "Inst occurs in data structures"); 456} 457