MemoryDependenceAnalysis.cpp revision 237a8287454389a5b940e18c1efb2201fc443208
1//===- MemoryDependenceAnalysis.cpp - Mem Deps Implementation --*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements an analysis that determines, for a given memory 11// operation, what preceding memory operations it depends on. It builds on 12// alias analysis information, and tries to provide a lazy, caching interface to 13// a common kind of alias information query. 14// 15//===----------------------------------------------------------------------===// 16 17#define DEBUG_TYPE "memdep" 18#include "llvm/Analysis/MemoryDependenceAnalysis.h" 19#include "llvm/Constants.h" 20#include "llvm/Instructions.h" 21#include "llvm/Function.h" 22#include "llvm/Analysis/AliasAnalysis.h" 23#include "llvm/ADT/Statistic.h" 24#include "llvm/ADT/STLExtras.h" 25#include "llvm/Support/CFG.h" 26#include "llvm/Support/CommandLine.h" 27#include "llvm/Support/Debug.h" 28#include "llvm/Target/TargetData.h" 29using namespace llvm; 30 31STATISTIC(NumCacheNonLocal, "Number of cached non-local responses"); 32STATISTIC(NumUncacheNonLocal, "Number of uncached non-local responses"); 33 34char MemoryDependenceAnalysis::ID = 0; 35 36// Register this pass... 37static RegisterPass<MemoryDependenceAnalysis> X("memdep", 38 "Memory Dependence Analysis", false, true); 39 40/// getAnalysisUsage - Does not modify anything. It uses Alias Analysis. 41/// 42void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { 43 AU.setPreservesAll(); 44 AU.addRequiredTransitive<AliasAnalysis>(); 45 AU.addRequiredTransitive<TargetData>(); 46} 47 48/// getCallSiteDependency - Private helper for finding the local dependencies 49/// of a call site. 50MemoryDependenceAnalysis::DepResultTy MemoryDependenceAnalysis:: 51getCallSiteDependency(CallSite C, BasicBlock::iterator ScanIt, 52 BasicBlock *BB) { 53 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 54 TargetData &TD = getAnalysis<TargetData>(); 55 56 // Walk backwards through the block, looking for dependencies 57 while (ScanIt != BB->begin()) { 58 Instruction *Inst = --ScanIt; 59 60 // If this inst is a memory op, get the pointer it accessed 61 Value *Pointer = 0; 62 uint64_t PointerSize = 0; 63 if (StoreInst *S = dyn_cast<StoreInst>(Inst)) { 64 Pointer = S->getPointerOperand(); 65 PointerSize = TD.getTypeStoreSize(S->getOperand(0)->getType()); 66 } else if (AllocationInst *AI = dyn_cast<AllocationInst>(Inst)) { 67 Pointer = AI; 68 if (ConstantInt *C = dyn_cast<ConstantInt>(AI->getArraySize())) 69 // Use ABI size (size between elements), not store size (size of one 70 // element without padding). 71 PointerSize = C->getZExtValue() * 72 TD.getABITypeSize(AI->getAllocatedType()); 73 else 74 PointerSize = ~0UL; 75 } else if (VAArgInst *V = dyn_cast<VAArgInst>(Inst)) { 76 Pointer = V->getOperand(0); 77 PointerSize = TD.getTypeStoreSize(V->getType()); 78 } else if (FreeInst *F = dyn_cast<FreeInst>(Inst)) { 79 Pointer = F->getPointerOperand(); 80 81 // FreeInsts erase the entire structure 82 PointerSize = ~0UL; 83 } else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) { 84 if (AA.getModRefBehavior(CallSite::get(Inst)) == 85 AliasAnalysis::DoesNotAccessMemory) 86 continue; 87 return DepResultTy(Inst, Normal); 88 } else 89 continue; 90 91 if (AA.getModRefInfo(C, Pointer, PointerSize) != AliasAnalysis::NoModRef) 92 return DepResultTy(Inst, Normal); 93 } 94 95 // No dependence found. 96 return DepResultTy(0, NonLocal); 97} 98 99/// getDependency - Return the instruction on which a memory operation 100/// depends. The local parameter indicates if the query should only 101/// evaluate dependencies within the same basic block. 102MemoryDependenceAnalysis::DepResultTy MemoryDependenceAnalysis:: 103getDependencyFromInternal(Instruction *QueryInst, BasicBlock::iterator ScanIt, 104 BasicBlock *BB) { 105 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 106 TargetData &TD = getAnalysis<TargetData>(); 107 108 // Get the pointer value for which dependence will be determined 109 Value *MemPtr = 0; 110 uint64_t MemSize = 0; 111 bool MemVolatile = false; 112 113 if (StoreInst* S = dyn_cast<StoreInst>(QueryInst)) { 114 MemPtr = S->getPointerOperand(); 115 MemSize = TD.getTypeStoreSize(S->getOperand(0)->getType()); 116 MemVolatile = S->isVolatile(); 117 } else if (LoadInst* L = dyn_cast<LoadInst>(QueryInst)) { 118 MemPtr = L->getPointerOperand(); 119 MemSize = TD.getTypeStoreSize(L->getType()); 120 MemVolatile = L->isVolatile(); 121 } else if (VAArgInst* V = dyn_cast<VAArgInst>(QueryInst)) { 122 MemPtr = V->getOperand(0); 123 MemSize = TD.getTypeStoreSize(V->getType()); 124 } else if (FreeInst* F = dyn_cast<FreeInst>(QueryInst)) { 125 MemPtr = F->getPointerOperand(); 126 // FreeInsts erase the entire structure, not just a field. 127 MemSize = ~0UL; 128 } else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) 129 return getCallSiteDependency(CallSite::get(QueryInst), ScanIt, BB); 130 else // Non-memory instructions depend on nothing. 131 return DepResultTy(0, None); 132 133 // Walk backwards through the basic block, looking for dependencies 134 while (ScanIt != BB->begin()) { 135 Instruction *Inst = --ScanIt; 136 137 // If the access is volatile and this is a volatile load/store, return a 138 // dependence. 139 if (MemVolatile && 140 ((isa<LoadInst>(Inst) && cast<LoadInst>(Inst)->isVolatile()) || 141 (isa<StoreInst>(Inst) && cast<StoreInst>(Inst)->isVolatile()))) 142 return DepResultTy(Inst, Normal); 143 144 // MemDep is broken w.r.t. loads: it says that two loads of the same pointer 145 // depend on each other. :( 146 if (LoadInst *L = dyn_cast<LoadInst>(Inst)) { 147 Value *Pointer = L->getPointerOperand(); 148 uint64_t PointerSize = TD.getTypeStoreSize(L->getType()); 149 150 // If we found a pointer, check if it could be the same as our pointer 151 AliasAnalysis::AliasResult R = 152 AA.alias(Pointer, PointerSize, MemPtr, MemSize); 153 154 if (R == AliasAnalysis::NoAlias) 155 continue; 156 157 // May-alias loads don't depend on each other without a dependence. 158 if (isa<LoadInst>(QueryInst) && R == AliasAnalysis::MayAlias) 159 continue; 160 return DepResultTy(Inst, Normal); 161 } 162 163 // If this is an allocation, and if we know that the accessed pointer is to 164 // the allocation, return None. This means that there is no dependence and 165 // the access can be optimized based on that. For example, a load could 166 // turn into undef. 167 if (AllocationInst *AI = dyn_cast<AllocationInst>(Inst)) { 168 Value *AccessPtr = MemPtr->getUnderlyingObject(); 169 170 if (AccessPtr == AI || 171 AA.alias(AI, 1, AccessPtr, 1) == AliasAnalysis::MustAlias) 172 return DepResultTy(0, None); 173 continue; 174 } 175 176 // See if this instruction mod/ref's the pointer. 177 AliasAnalysis::ModRefResult MRR = AA.getModRefInfo(Inst, MemPtr, MemSize); 178 179 if (MRR == AliasAnalysis::NoModRef) 180 continue; 181 182 // Loads don't depend on read-only instructions. 183 if (isa<LoadInst>(QueryInst) && MRR == AliasAnalysis::Ref) 184 continue; 185 186 // Otherwise, there is a dependence. 187 return DepResultTy(Inst, Normal); 188 } 189 190 // If we found nothing, return the non-local flag. 191 return DepResultTy(0, NonLocal); 192} 193 194/// getDependency - Return the instruction on which a memory operation 195/// depends. 196MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) { 197 Instruction *ScanPos = QueryInst; 198 199 // Check for a cached result 200 DepResultTy &LocalCache = LocalDeps[QueryInst]; 201 202 // If the cached entry is non-dirty, just return it. Note that this depends 203 // on DepResultTy's default constructing to 'dirty'. 204 if (LocalCache.getInt() != Dirty) 205 return ConvToResult(LocalCache); 206 207 // Otherwise, if we have a dirty entry, we know we can start the scan at that 208 // instruction, which may save us some work. 209 if (Instruction *Inst = LocalCache.getPointer()) 210 ScanPos = Inst; 211 212 // Do the scan. 213 LocalCache = getDependencyFromInternal(QueryInst, ScanPos, 214 QueryInst->getParent()); 215 216 // Remember the result! 217 if (Instruction *I = LocalCache.getPointer()) 218 ReverseLocalDeps[I].insert(QueryInst); 219 220 return ConvToResult(LocalCache); 221} 222 223/// getNonLocalDependency - Perform a full dependency query for the 224/// specified instruction, returning the set of blocks that the value is 225/// potentially live across. The returned set of results will include a 226/// "NonLocal" result for all blocks where the value is live across. 227/// 228/// This method assumes the instruction returns a "nonlocal" dependency 229/// within its own block. 230/// 231void MemoryDependenceAnalysis:: 232getNonLocalDependency(Instruction *QueryInst, 233 SmallVectorImpl<std::pair<BasicBlock*, 234 MemDepResult> > &Result) { 235 assert(getDependency(QueryInst).isNonLocal() && 236 "getNonLocalDependency should only be used on insts with non-local deps!"); 237 DenseMap<BasicBlock*, DepResultTy> &Cache = NonLocalDeps[QueryInst]; 238 239 /// DirtyBlocks - This is the set of blocks that need to be recomputed. In 240 /// the cached case, this can happen due to instructions being deleted etc. In 241 /// the uncached case, this starts out as the set of predecessors we care 242 /// about. 243 SmallVector<BasicBlock*, 32> DirtyBlocks; 244 245 if (!Cache.empty()) { 246 // If we already have a partially computed set of results, scan them to 247 // determine what is dirty, seeding our initial DirtyBlocks worklist. 248 // FIXME: In the "don't need to be updated" case, this is expensive, why not 249 // have a per-"cache" flag saying it is undirty? 250 for (DenseMap<BasicBlock*, DepResultTy>::iterator I = Cache.begin(), 251 E = Cache.end(); I != E; ++I) 252 if (I->second.getInt() == Dirty) 253 DirtyBlocks.push_back(I->first); 254 255 NumCacheNonLocal++; 256 257 //cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: " 258 // << Cache.size() << " cached: " << *QueryInst; 259 } else { 260 // Seed DirtyBlocks with each of the preds of QueryInst's block. 261 BasicBlock *QueryBB = QueryInst->getParent(); 262 DirtyBlocks.append(pred_begin(QueryBB), pred_end(QueryBB)); 263 NumUncacheNonLocal++; 264 } 265 266 // Iterate while we still have blocks to update. 267 while (!DirtyBlocks.empty()) { 268 BasicBlock *DirtyBB = DirtyBlocks.back(); 269 DirtyBlocks.pop_back(); 270 271 // Get the entry for this block. Note that this relies on DepResultTy 272 // default initializing to Dirty. 273 DepResultTy &DirtyBBEntry = Cache[DirtyBB]; 274 275 // If DirtyBBEntry isn't dirty, it ended up on the worklist multiple times. 276 if (DirtyBBEntry.getInt() != Dirty) continue; 277 278 // If the dirty entry has a pointer, start scanning from it so we don't have 279 // to rescan the entire block. 280 BasicBlock::iterator ScanPos = DirtyBB->end(); 281 if (Instruction *Inst = DirtyBBEntry.getPointer()) 282 ScanPos = Inst; 283 284 // Find out if this block has a local dependency for QueryInst. 285 DirtyBBEntry = getDependencyFromInternal(QueryInst, ScanPos, DirtyBB); 286 287 // If the block has a dependency (i.e. it isn't completely transparent to 288 // the value), remember it! 289 if (DirtyBBEntry.getInt() != NonLocal) { 290 // Keep the ReverseNonLocalDeps map up to date so we can efficiently 291 // update this when we remove instructions. 292 if (Instruction *Inst = DirtyBBEntry.getPointer()) 293 ReverseNonLocalDeps[Inst].insert(QueryInst); 294 continue; 295 } 296 297 // If the block *is* completely transparent to the load, we need to check 298 // the predecessors of this block. Add them to our worklist. 299 DirtyBlocks.append(pred_begin(DirtyBB), pred_end(DirtyBB)); 300 } 301 302 303 // Copy the result into the output set. 304 for (DenseMap<BasicBlock*, DepResultTy>::iterator I = Cache.begin(), 305 E = Cache.end(); I != E; ++I) 306 Result.push_back(std::make_pair(I->first, ConvToResult(I->second))); 307} 308 309/// removeInstruction - Remove an instruction from the dependence analysis, 310/// updating the dependence of instructions that previously depended on it. 311/// This method attempts to keep the cache coherent using the reverse map. 312void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) { 313 // Walk through the Non-local dependencies, removing this one as the value 314 // for any cached queries. 315 for (DenseMap<BasicBlock*, DepResultTy>::iterator DI = 316 NonLocalDeps[RemInst].begin(), DE = NonLocalDeps[RemInst].end(); 317 DI != DE; ++DI) 318 if (Instruction *Inst = DI->second.getPointer()) 319 ReverseNonLocalDeps[Inst].erase(RemInst); 320 321 // If we have a cached local dependence query for this instruction, remove it. 322 // 323 LocalDepMapType::iterator LocalDepEntry = LocalDeps.find(RemInst); 324 if (LocalDepEntry != LocalDeps.end()) { 325 // Remove us from DepInst's reverse set now that the local dep info is gone. 326 if (Instruction *Inst = LocalDepEntry->second.getPointer()) { 327 SmallPtrSet<Instruction*, 4> &RLD = ReverseLocalDeps[Inst]; 328 RLD.erase(RemInst); 329 if (RLD.empty()) 330 ReverseLocalDeps.erase(Inst); 331 } 332 333 // Remove this local dependency info. 334 LocalDeps.erase(LocalDepEntry); 335 } 336 337 // Loop over all of the things that depend on the instruction we're removing. 338 // 339 SmallVector<std::pair<Instruction*, Instruction*>, 8> ReverseDepsToAdd; 340 341 ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst); 342 if (ReverseDepIt != ReverseLocalDeps.end()) { 343 SmallPtrSet<Instruction*, 4> &ReverseDeps = ReverseDepIt->second; 344 // RemInst can't be the terminator if it has stuff depending on it. 345 assert(!ReverseDeps.empty() && !isa<TerminatorInst>(RemInst) && 346 "Nothing can locally depend on a terminator"); 347 348 // Anything that was locally dependent on RemInst is now going to be 349 // dependent on the instruction after RemInst. It will have the dirty flag 350 // set so it will rescan. This saves having to scan the entire block to get 351 // to this point. 352 Instruction *NewDepInst = next(BasicBlock::iterator(RemInst)); 353 354 for (SmallPtrSet<Instruction*, 4>::iterator I = ReverseDeps.begin(), 355 E = ReverseDeps.end(); I != E; ++I) { 356 Instruction *InstDependingOnRemInst = *I; 357 358 // If we thought the instruction depended on itself (possible for 359 // unconfirmed dependencies) ignore the update. 360 if (InstDependingOnRemInst == RemInst) continue; 361 362 LocalDeps[InstDependingOnRemInst] = DepResultTy(NewDepInst, Dirty); 363 364 // Make sure to remember that new things depend on NewDepInst. 365 ReverseDepsToAdd.push_back(std::make_pair(NewDepInst, 366 InstDependingOnRemInst)); 367 } 368 369 ReverseLocalDeps.erase(ReverseDepIt); 370 371 // Add new reverse deps after scanning the set, to avoid invalidating the 372 // 'ReverseDeps' reference. 373 while (!ReverseDepsToAdd.empty()) { 374 ReverseLocalDeps[ReverseDepsToAdd.back().first] 375 .insert(ReverseDepsToAdd.back().second); 376 ReverseDepsToAdd.pop_back(); 377 } 378 } 379 380 ReverseDepIt = ReverseNonLocalDeps.find(RemInst); 381 if (ReverseDepIt != ReverseNonLocalDeps.end()) { 382 SmallPtrSet<Instruction*, 4>& set = ReverseDepIt->second; 383 for (SmallPtrSet<Instruction*, 4>::iterator I = set.begin(), E = set.end(); 384 I != E; ++I) 385 for (DenseMap<BasicBlock*, DepResultTy>::iterator 386 DI = NonLocalDeps[*I].begin(), DE = NonLocalDeps[*I].end(); 387 DI != DE; ++DI) 388 if (DI->second.getPointer() == RemInst) { 389 // Convert to a dirty entry for the subsequent instruction. 390 DI->second.setInt(Dirty); 391 if (RemInst->isTerminator()) 392 DI->second.setPointer(0); 393 else { 394 Instruction *NextI = next(BasicBlock::iterator(RemInst)); 395 DI->second.setPointer(NextI); 396 assert(NextI != RemInst); 397 ReverseDepsToAdd.push_back(std::make_pair(NextI, *I)); 398 } 399 } 400 401 ReverseNonLocalDeps.erase(ReverseDepIt); 402 403 // Add new reverse deps after scanning the set, to avoid invalidating 'Set' 404 while (!ReverseDepsToAdd.empty()) { 405 ReverseNonLocalDeps[ReverseDepsToAdd.back().first] 406 .insert(ReverseDepsToAdd.back().second); 407 ReverseDepsToAdd.pop_back(); 408 } 409 } 410 411 NonLocalDeps.erase(RemInst); 412 getAnalysis<AliasAnalysis>().deleteValue(RemInst); 413 DEBUG(verifyRemoved(RemInst)); 414} 415 416/// verifyRemoved - Verify that the specified instruction does not occur 417/// in our internal data structures. 418void MemoryDependenceAnalysis::verifyRemoved(Instruction *D) const { 419 for (LocalDepMapType::const_iterator I = LocalDeps.begin(), 420 E = LocalDeps.end(); I != E; ++I) { 421 assert(I->first != D && "Inst occurs in data structures"); 422 assert(I->second.getPointer() != D && 423 "Inst occurs in data structures"); 424 } 425 426 for (NonLocalDepMapType::const_iterator I = NonLocalDeps.begin(), 427 E = NonLocalDeps.end(); I != E; ++I) { 428 assert(I->first != D && "Inst occurs in data structures"); 429 for (DenseMap<BasicBlock*, DepResultTy>::iterator II = I->second.begin(), 430 EE = I->second.end(); II != EE; ++II) 431 assert(II->second.getPointer() != D && "Inst occurs in data structures"); 432 } 433 434 for (ReverseDepMapType::const_iterator I = ReverseLocalDeps.begin(), 435 E = ReverseLocalDeps.end(); I != E; ++I) 436 for (SmallPtrSet<Instruction*, 4>::const_iterator II = I->second.begin(), 437 EE = I->second.end(); II != EE; ++II) 438 assert(*II != D && "Inst occurs in data structures"); 439 440 for (ReverseDepMapType::const_iterator I = ReverseNonLocalDeps.begin(), 441 E = ReverseNonLocalDeps.end(); 442 I != E; ++I) 443 for (SmallPtrSet<Instruction*, 4>::const_iterator II = I->second.begin(), 444 EE = I->second.end(); II != EE; ++II) 445 assert(*II != D && "Inst occurs in data structures"); 446} 447