MemoryDependenceAnalysis.cpp revision cfbb634225007b2eddfbfcbf2adff2291b9c03bd
1//===- MemoryDependenceAnalysis.cpp - Mem Deps Implementation --*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements an analysis that determines, for a given memory 11// operation, what preceding memory operations it depends on. It builds on 12// alias analysis information, and tries to provide a lazy, caching interface to 13// a common kind of alias information query. 14// 15//===----------------------------------------------------------------------===// 16 17#define DEBUG_TYPE "memdep" 18#include "llvm/Analysis/MemoryDependenceAnalysis.h" 19#include "llvm/Constants.h" 20#include "llvm/Instructions.h" 21#include "llvm/Function.h" 22#include "llvm/Analysis/AliasAnalysis.h" 23#include "llvm/ADT/Statistic.h" 24#include "llvm/ADT/STLExtras.h" 25#include "llvm/Support/CFG.h" 26#include "llvm/Support/CommandLine.h" 27#include "llvm/Support/Debug.h" 28#include "llvm/Target/TargetData.h" 29using namespace llvm; 30 31STATISTIC(NumCacheNonLocal, "Number of cached non-local responses"); 32STATISTIC(NumUncacheNonLocal, "Number of uncached non-local responses"); 33 34char MemoryDependenceAnalysis::ID = 0; 35 36// Register this pass... 37static RegisterPass<MemoryDependenceAnalysis> X("memdep", 38 "Memory Dependence Analysis", false, true); 39 40/// getAnalysisUsage - Does not modify anything. It uses Alias Analysis. 41/// 42void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { 43 AU.setPreservesAll(); 44 AU.addRequiredTransitive<AliasAnalysis>(); 45 AU.addRequiredTransitive<TargetData>(); 46} 47 48/// getCallSiteDependency - Private helper for finding the local dependencies 49/// of a call site. 50MemoryDependenceAnalysis::DepResultTy MemoryDependenceAnalysis:: 51getCallSiteDependency(CallSite C, BasicBlock::iterator ScanIt, 52 BasicBlock *BB) { 53 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 54 TargetData &TD = getAnalysis<TargetData>(); 55 56 // Walk backwards through the block, looking for dependencies 57 while (ScanIt != BB->begin()) { 58 Instruction *Inst = --ScanIt; 59 60 // If this inst is a memory op, get the pointer it accessed 61 Value *Pointer = 0; 62 uint64_t PointerSize = 0; 63 if (StoreInst *S = dyn_cast<StoreInst>(Inst)) { 64 Pointer = S->getPointerOperand(); 65 PointerSize = TD.getTypeStoreSize(S->getOperand(0)->getType()); 66 } else if (VAArgInst *V = dyn_cast<VAArgInst>(Inst)) { 67 Pointer = V->getOperand(0); 68 PointerSize = TD.getTypeStoreSize(V->getType()); 69 } else if (FreeInst *F = dyn_cast<FreeInst>(Inst)) { 70 Pointer = F->getPointerOperand(); 71 72 // FreeInsts erase the entire structure 73 PointerSize = ~0UL; 74 } else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) { 75 if (AA.getModRefBehavior(CallSite::get(Inst)) == 76 AliasAnalysis::DoesNotAccessMemory) 77 continue; 78 return DepResultTy(Inst, Normal); 79 } else { 80 // Non-memory instruction. 81 continue; 82 } 83 84 if (AA.getModRefInfo(C, Pointer, PointerSize) != AliasAnalysis::NoModRef) 85 return DepResultTy(Inst, Normal); 86 } 87 88 // No dependence found. 89 return DepResultTy(0, NonLocal); 90} 91 92/// getDependency - Return the instruction on which a memory operation 93/// depends. The local parameter indicates if the query should only 94/// evaluate dependencies within the same basic block. 95MemoryDependenceAnalysis::DepResultTy MemoryDependenceAnalysis:: 96getDependencyFromInternal(Instruction *QueryInst, BasicBlock::iterator ScanIt, 97 BasicBlock *BB) { 98 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 99 TargetData &TD = getAnalysis<TargetData>(); 100 101 // Get the pointer value for which dependence will be determined 102 Value *MemPtr = 0; 103 uint64_t MemSize = 0; 104 bool MemVolatile = false; 105 106 if (StoreInst* S = dyn_cast<StoreInst>(QueryInst)) { 107 MemPtr = S->getPointerOperand(); 108 MemSize = TD.getTypeStoreSize(S->getOperand(0)->getType()); 109 MemVolatile = S->isVolatile(); 110 } else if (LoadInst* L = dyn_cast<LoadInst>(QueryInst)) { 111 MemPtr = L->getPointerOperand(); 112 MemSize = TD.getTypeStoreSize(L->getType()); 113 MemVolatile = L->isVolatile(); 114 } else if (VAArgInst* V = dyn_cast<VAArgInst>(QueryInst)) { 115 MemPtr = V->getOperand(0); 116 MemSize = TD.getTypeStoreSize(V->getType()); 117 } else if (FreeInst* F = dyn_cast<FreeInst>(QueryInst)) { 118 MemPtr = F->getPointerOperand(); 119 // FreeInsts erase the entire structure, not just a field. 120 MemSize = ~0UL; 121 } else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) 122 return getCallSiteDependency(CallSite::get(QueryInst), ScanIt, BB); 123 else // Non-memory instructions depend on nothing. 124 return DepResultTy(0, None); 125 126 // Walk backwards through the basic block, looking for dependencies 127 while (ScanIt != BB->begin()) { 128 Instruction *Inst = --ScanIt; 129 130 // If the access is volatile and this is a volatile load/store, return a 131 // dependence. 132 if (MemVolatile && 133 ((isa<LoadInst>(Inst) && cast<LoadInst>(Inst)->isVolatile()) || 134 (isa<StoreInst>(Inst) && cast<StoreInst>(Inst)->isVolatile()))) 135 return DepResultTy(Inst, Normal); 136 137 // Values depend on loads if the pointers are must aliased. This means that 138 // a load depends on another must aliased load from the same value. 139 if (LoadInst *L = dyn_cast<LoadInst>(Inst)) { 140 Value *Pointer = L->getPointerOperand(); 141 uint64_t PointerSize = TD.getTypeStoreSize(L->getType()); 142 143 // If we found a pointer, check if it could be the same as our pointer 144 AliasAnalysis::AliasResult R = 145 AA.alias(Pointer, PointerSize, MemPtr, MemSize); 146 147 if (R == AliasAnalysis::NoAlias) 148 continue; 149 150 // May-alias loads don't depend on each other without a dependence. 151 if (isa<LoadInst>(QueryInst) && R == AliasAnalysis::MayAlias) 152 continue; 153 return DepResultTy(Inst, Normal); 154 } 155 156 // If this is an allocation, and if we know that the accessed pointer is to 157 // the allocation, return None. This means that there is no dependence and 158 // the access can be optimized based on that. For example, a load could 159 // turn into undef. 160 if (AllocationInst *AI = dyn_cast<AllocationInst>(Inst)) { 161 Value *AccessPtr = MemPtr->getUnderlyingObject(); 162 163 if (AccessPtr == AI || 164 AA.alias(AI, 1, AccessPtr, 1) == AliasAnalysis::MustAlias) 165 return DepResultTy(0, None); 166 continue; 167 } 168 169 // See if this instruction mod/ref's the pointer. 170 AliasAnalysis::ModRefResult MRR = AA.getModRefInfo(Inst, MemPtr, MemSize); 171 172 if (MRR == AliasAnalysis::NoModRef) 173 continue; 174 175 // Loads don't depend on read-only instructions. 176 if (isa<LoadInst>(QueryInst) && MRR == AliasAnalysis::Ref) 177 continue; 178 179 // Otherwise, there is a dependence. 180 return DepResultTy(Inst, Normal); 181 } 182 183 // If we found nothing, return the non-local flag. 184 return DepResultTy(0, NonLocal); 185} 186 187/// getDependency - Return the instruction on which a memory operation 188/// depends. 189MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) { 190 Instruction *ScanPos = QueryInst; 191 192 // Check for a cached result 193 DepResultTy &LocalCache = LocalDeps[QueryInst]; 194 195 // If the cached entry is non-dirty, just return it. Note that this depends 196 // on DepResultTy's default constructing to 'dirty'. 197 if (LocalCache.getInt() != Dirty) 198 return ConvToResult(LocalCache); 199 200 // Otherwise, if we have a dirty entry, we know we can start the scan at that 201 // instruction, which may save us some work. 202 if (Instruction *Inst = LocalCache.getPointer()) 203 ScanPos = Inst; 204 205 // Do the scan. 206 LocalCache = getDependencyFromInternal(QueryInst, ScanPos, 207 QueryInst->getParent()); 208 209 // Remember the result! 210 if (Instruction *I = LocalCache.getPointer()) 211 ReverseLocalDeps[I].insert(QueryInst); 212 213 return ConvToResult(LocalCache); 214} 215 216/// getNonLocalDependency - Perform a full dependency query for the 217/// specified instruction, returning the set of blocks that the value is 218/// potentially live across. The returned set of results will include a 219/// "NonLocal" result for all blocks where the value is live across. 220/// 221/// This method assumes the instruction returns a "nonlocal" dependency 222/// within its own block. 223/// 224void MemoryDependenceAnalysis:: 225getNonLocalDependency(Instruction *QueryInst, 226 SmallVectorImpl<std::pair<BasicBlock*, 227 MemDepResult> > &Result) { 228 assert(getDependency(QueryInst).isNonLocal() && 229 "getNonLocalDependency should only be used on insts with non-local deps!"); 230 DenseMap<BasicBlock*, DepResultTy> &Cache = NonLocalDeps[QueryInst]; 231 232 /// DirtyBlocks - This is the set of blocks that need to be recomputed. In 233 /// the cached case, this can happen due to instructions being deleted etc. In 234 /// the uncached case, this starts out as the set of predecessors we care 235 /// about. 236 SmallVector<BasicBlock*, 32> DirtyBlocks; 237 238 if (!Cache.empty()) { 239 // If we already have a partially computed set of results, scan them to 240 // determine what is dirty, seeding our initial DirtyBlocks worklist. 241 // FIXME: In the "don't need to be updated" case, this is expensive, why not 242 // have a per-"cache" flag saying it is undirty? 243 for (DenseMap<BasicBlock*, DepResultTy>::iterator I = Cache.begin(), 244 E = Cache.end(); I != E; ++I) 245 if (I->second.getInt() == Dirty) 246 DirtyBlocks.push_back(I->first); 247 248 NumCacheNonLocal++; 249 250 //cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: " 251 // << Cache.size() << " cached: " << *QueryInst; 252 } else { 253 // Seed DirtyBlocks with each of the preds of QueryInst's block. 254 BasicBlock *QueryBB = QueryInst->getParent(); 255 DirtyBlocks.append(pred_begin(QueryBB), pred_end(QueryBB)); 256 NumUncacheNonLocal++; 257 } 258 259 // Iterate while we still have blocks to update. 260 while (!DirtyBlocks.empty()) { 261 BasicBlock *DirtyBB = DirtyBlocks.back(); 262 DirtyBlocks.pop_back(); 263 264 // Get the entry for this block. Note that this relies on DepResultTy 265 // default initializing to Dirty. 266 DepResultTy &DirtyBBEntry = Cache[DirtyBB]; 267 268 // If DirtyBBEntry isn't dirty, it ended up on the worklist multiple times. 269 if (DirtyBBEntry.getInt() != Dirty) continue; 270 271 // If the dirty entry has a pointer, start scanning from it so we don't have 272 // to rescan the entire block. 273 BasicBlock::iterator ScanPos = DirtyBB->end(); 274 if (Instruction *Inst = DirtyBBEntry.getPointer()) 275 ScanPos = Inst; 276 277 // Find out if this block has a local dependency for QueryInst. 278 DirtyBBEntry = getDependencyFromInternal(QueryInst, ScanPos, DirtyBB); 279 280 // If the block has a dependency (i.e. it isn't completely transparent to 281 // the value), remember it! 282 if (DirtyBBEntry.getInt() != NonLocal) { 283 // Keep the ReverseNonLocalDeps map up to date so we can efficiently 284 // update this when we remove instructions. 285 if (Instruction *Inst = DirtyBBEntry.getPointer()) 286 ReverseNonLocalDeps[Inst].insert(QueryInst); 287 continue; 288 } 289 290 // If the block *is* completely transparent to the load, we need to check 291 // the predecessors of this block. Add them to our worklist. 292 DirtyBlocks.append(pred_begin(DirtyBB), pred_end(DirtyBB)); 293 } 294 295 296 // Copy the result into the output set. 297 for (DenseMap<BasicBlock*, DepResultTy>::iterator I = Cache.begin(), 298 E = Cache.end(); I != E; ++I) 299 Result.push_back(std::make_pair(I->first, ConvToResult(I->second))); 300} 301 302/// removeInstruction - Remove an instruction from the dependence analysis, 303/// updating the dependence of instructions that previously depended on it. 304/// This method attempts to keep the cache coherent using the reverse map. 305void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) { 306 // Walk through the Non-local dependencies, removing this one as the value 307 // for any cached queries. 308 for (DenseMap<BasicBlock*, DepResultTy>::iterator DI = 309 NonLocalDeps[RemInst].begin(), DE = NonLocalDeps[RemInst].end(); 310 DI != DE; ++DI) 311 if (Instruction *Inst = DI->second.getPointer()) 312 ReverseNonLocalDeps[Inst].erase(RemInst); 313 314 // If we have a cached local dependence query for this instruction, remove it. 315 // 316 LocalDepMapType::iterator LocalDepEntry = LocalDeps.find(RemInst); 317 if (LocalDepEntry != LocalDeps.end()) { 318 // Remove us from DepInst's reverse set now that the local dep info is gone. 319 if (Instruction *Inst = LocalDepEntry->second.getPointer()) { 320 SmallPtrSet<Instruction*, 4> &RLD = ReverseLocalDeps[Inst]; 321 RLD.erase(RemInst); 322 if (RLD.empty()) 323 ReverseLocalDeps.erase(Inst); 324 } 325 326 // Remove this local dependency info. 327 LocalDeps.erase(LocalDepEntry); 328 } 329 330 // Loop over all of the things that depend on the instruction we're removing. 331 // 332 SmallVector<std::pair<Instruction*, Instruction*>, 8> ReverseDepsToAdd; 333 334 ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst); 335 if (ReverseDepIt != ReverseLocalDeps.end()) { 336 SmallPtrSet<Instruction*, 4> &ReverseDeps = ReverseDepIt->second; 337 // RemInst can't be the terminator if it has stuff depending on it. 338 assert(!ReverseDeps.empty() && !isa<TerminatorInst>(RemInst) && 339 "Nothing can locally depend on a terminator"); 340 341 // Anything that was locally dependent on RemInst is now going to be 342 // dependent on the instruction after RemInst. It will have the dirty flag 343 // set so it will rescan. This saves having to scan the entire block to get 344 // to this point. 345 Instruction *NewDepInst = next(BasicBlock::iterator(RemInst)); 346 347 for (SmallPtrSet<Instruction*, 4>::iterator I = ReverseDeps.begin(), 348 E = ReverseDeps.end(); I != E; ++I) { 349 Instruction *InstDependingOnRemInst = *I; 350 351 // If we thought the instruction depended on itself (possible for 352 // unconfirmed dependencies) ignore the update. 353 if (InstDependingOnRemInst == RemInst) continue; 354 355 LocalDeps[InstDependingOnRemInst] = DepResultTy(NewDepInst, Dirty); 356 357 // Make sure to remember that new things depend on NewDepInst. 358 ReverseDepsToAdd.push_back(std::make_pair(NewDepInst, 359 InstDependingOnRemInst)); 360 } 361 362 ReverseLocalDeps.erase(ReverseDepIt); 363 364 // Add new reverse deps after scanning the set, to avoid invalidating the 365 // 'ReverseDeps' reference. 366 while (!ReverseDepsToAdd.empty()) { 367 ReverseLocalDeps[ReverseDepsToAdd.back().first] 368 .insert(ReverseDepsToAdd.back().second); 369 ReverseDepsToAdd.pop_back(); 370 } 371 } 372 373 ReverseDepIt = ReverseNonLocalDeps.find(RemInst); 374 if (ReverseDepIt != ReverseNonLocalDeps.end()) { 375 SmallPtrSet<Instruction*, 4>& set = ReverseDepIt->second; 376 for (SmallPtrSet<Instruction*, 4>::iterator I = set.begin(), E = set.end(); 377 I != E; ++I) 378 for (DenseMap<BasicBlock*, DepResultTy>::iterator 379 DI = NonLocalDeps[*I].begin(), DE = NonLocalDeps[*I].end(); 380 DI != DE; ++DI) 381 if (DI->second.getPointer() == RemInst) { 382 // Convert to a dirty entry for the subsequent instruction. 383 DI->second.setInt(Dirty); 384 if (RemInst->isTerminator()) 385 DI->second.setPointer(0); 386 else { 387 Instruction *NextI = next(BasicBlock::iterator(RemInst)); 388 DI->second.setPointer(NextI); 389 assert(NextI != RemInst); 390 ReverseDepsToAdd.push_back(std::make_pair(NextI, *I)); 391 } 392 } 393 394 ReverseNonLocalDeps.erase(ReverseDepIt); 395 396 // Add new reverse deps after scanning the set, to avoid invalidating 'Set' 397 while (!ReverseDepsToAdd.empty()) { 398 ReverseNonLocalDeps[ReverseDepsToAdd.back().first] 399 .insert(ReverseDepsToAdd.back().second); 400 ReverseDepsToAdd.pop_back(); 401 } 402 } 403 404 NonLocalDeps.erase(RemInst); 405 getAnalysis<AliasAnalysis>().deleteValue(RemInst); 406 DEBUG(verifyRemoved(RemInst)); 407} 408 409/// verifyRemoved - Verify that the specified instruction does not occur 410/// in our internal data structures. 411void MemoryDependenceAnalysis::verifyRemoved(Instruction *D) const { 412 for (LocalDepMapType::const_iterator I = LocalDeps.begin(), 413 E = LocalDeps.end(); I != E; ++I) { 414 assert(I->first != D && "Inst occurs in data structures"); 415 assert(I->second.getPointer() != D && 416 "Inst occurs in data structures"); 417 } 418 419 for (NonLocalDepMapType::const_iterator I = NonLocalDeps.begin(), 420 E = NonLocalDeps.end(); I != E; ++I) { 421 assert(I->first != D && "Inst occurs in data structures"); 422 for (DenseMap<BasicBlock*, DepResultTy>::iterator II = I->second.begin(), 423 EE = I->second.end(); II != EE; ++II) 424 assert(II->second.getPointer() != D && "Inst occurs in data structures"); 425 } 426 427 for (ReverseDepMapType::const_iterator I = ReverseLocalDeps.begin(), 428 E = ReverseLocalDeps.end(); I != E; ++I) 429 for (SmallPtrSet<Instruction*, 4>::const_iterator II = I->second.begin(), 430 EE = I->second.end(); II != EE; ++II) 431 assert(*II != D && "Inst occurs in data structures"); 432 433 for (ReverseDepMapType::const_iterator I = ReverseNonLocalDeps.begin(), 434 E = ReverseNonLocalDeps.end(); 435 I != E; ++I) 436 for (SmallPtrSet<Instruction*, 4>::const_iterator II = I->second.begin(), 437 EE = I->second.end(); II != EE; ++II) 438 assert(*II != D && "Inst occurs in data structures"); 439} 440