MemoryDependenceAnalysis.cpp revision b414142036012dd9432c4e8c5fef09d4d49fcc22
1//===- MemoryDependenceAnalysis.cpp - Mem Deps Implementation  --*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements an analysis that determines, for a given memory
11// operation, what preceding memory operations it depends on.  It builds on
12// alias analysis information, and tries to provide a lazy, caching interface to
13// a common kind of alias information query.
14//
15//===----------------------------------------------------------------------===//
16
17#define DEBUG_TYPE "memdep"
18#include "llvm/Analysis/MemoryDependenceAnalysis.h"
19#include "llvm/Analysis/ValueTracking.h"
20#include "llvm/Instructions.h"
21#include "llvm/IntrinsicInst.h"
22#include "llvm/Function.h"
23#include "llvm/LLVMContext.h"
24#include "llvm/Analysis/AliasAnalysis.h"
25#include "llvm/Analysis/Dominators.h"
26#include "llvm/Analysis/InstructionSimplify.h"
27#include "llvm/Analysis/MemoryBuiltins.h"
28#include "llvm/Analysis/PHITransAddr.h"
29#include "llvm/Analysis/ValueTracking.h"
30#include "llvm/ADT/Statistic.h"
31#include "llvm/ADT/STLExtras.h"
32#include "llvm/Support/PredIteratorCache.h"
33#include "llvm/Support/Debug.h"
34#include "llvm/Target/TargetData.h"
35using namespace llvm;
36
37STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses");
38STATISTIC(NumCacheDirtyNonLocal, "Number of dirty cached non-local responses");
39STATISTIC(NumUncacheNonLocal, "Number of uncached non-local responses");
40
41STATISTIC(NumCacheNonLocalPtr,
42          "Number of fully cached non-local ptr responses");
43STATISTIC(NumCacheDirtyNonLocalPtr,
44          "Number of cached, but dirty, non-local ptr responses");
45STATISTIC(NumUncacheNonLocalPtr,
46          "Number of uncached non-local ptr responses");
47STATISTIC(NumCacheCompleteNonLocalPtr,
48          "Number of block queries that were completely cached");
49
50// Limit for the number of instructions to scan in a block.
51// FIXME: Figure out what a sane value is for this.
52//        (500 is relatively insane.)
53static const int BlockScanLimit = 500;
54
55char MemoryDependenceAnalysis::ID = 0;
56
57// Register this pass...
58INITIALIZE_PASS_BEGIN(MemoryDependenceAnalysis, "memdep",
59                "Memory Dependence Analysis", false, true)
60INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
61INITIALIZE_PASS_END(MemoryDependenceAnalysis, "memdep",
62                      "Memory Dependence Analysis", false, true)
63
64MemoryDependenceAnalysis::MemoryDependenceAnalysis()
65: FunctionPass(ID), PredCache(0) {
66  initializeMemoryDependenceAnalysisPass(*PassRegistry::getPassRegistry());
67}
68MemoryDependenceAnalysis::~MemoryDependenceAnalysis() {
69}
70
71/// Clean up memory in between runs
72void MemoryDependenceAnalysis::releaseMemory() {
73  LocalDeps.clear();
74  NonLocalDeps.clear();
75  NonLocalPointerDeps.clear();
76  ReverseLocalDeps.clear();
77  ReverseNonLocalDeps.clear();
78  ReverseNonLocalPtrDeps.clear();
79  PredCache->clear();
80}
81
82
83
84/// getAnalysisUsage - Does not modify anything.  It uses Alias Analysis.
85///
86void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
87  AU.setPreservesAll();
88  AU.addRequiredTransitive<AliasAnalysis>();
89}
90
91bool MemoryDependenceAnalysis::runOnFunction(Function &) {
92  AA = &getAnalysis<AliasAnalysis>();
93  TD = getAnalysisIfAvailable<TargetData>();
94  if (PredCache == 0)
95    PredCache.reset(new PredIteratorCache());
96  return false;
97}
98
99/// RemoveFromReverseMap - This is a helper function that removes Val from
100/// 'Inst's set in ReverseMap.  If the set becomes empty, remove Inst's entry.
101template <typename KeyTy>
102static void RemoveFromReverseMap(DenseMap<Instruction*,
103                                 SmallPtrSet<KeyTy, 4> > &ReverseMap,
104                                 Instruction *Inst, KeyTy Val) {
105  typename DenseMap<Instruction*, SmallPtrSet<KeyTy, 4> >::iterator
106  InstIt = ReverseMap.find(Inst);
107  assert(InstIt != ReverseMap.end() && "Reverse map out of sync?");
108  bool Found = InstIt->second.erase(Val);
109  assert(Found && "Invalid reverse map!"); (void)Found;
110  if (InstIt->second.empty())
111    ReverseMap.erase(InstIt);
112}
113
114/// GetLocation - If the given instruction references a specific memory
115/// location, fill in Loc with the details, otherwise set Loc.Ptr to null.
116/// Return a ModRefInfo value describing the general behavior of the
117/// instruction.
118static
119AliasAnalysis::ModRefResult GetLocation(const Instruction *Inst,
120                                        AliasAnalysis::Location &Loc,
121                                        AliasAnalysis *AA) {
122  if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
123    if (LI->isUnordered()) {
124      Loc = AA->getLocation(LI);
125      return AliasAnalysis::Ref;
126    } else if (LI->getOrdering() == Monotonic) {
127      Loc = AA->getLocation(LI);
128      return AliasAnalysis::ModRef;
129    }
130    Loc = AliasAnalysis::Location();
131    return AliasAnalysis::ModRef;
132  }
133
134  if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
135    if (SI->isUnordered()) {
136      Loc = AA->getLocation(SI);
137      return AliasAnalysis::Mod;
138    } else if (SI->getOrdering() == Monotonic) {
139      Loc = AA->getLocation(SI);
140      return AliasAnalysis::ModRef;
141    }
142    Loc = AliasAnalysis::Location();
143    return AliasAnalysis::ModRef;
144  }
145
146  if (const VAArgInst *V = dyn_cast<VAArgInst>(Inst)) {
147    Loc = AA->getLocation(V);
148    return AliasAnalysis::ModRef;
149  }
150
151  if (const CallInst *CI = isFreeCall(Inst)) {
152    // calls to free() deallocate the entire structure
153    Loc = AliasAnalysis::Location(CI->getArgOperand(0));
154    return AliasAnalysis::Mod;
155  }
156
157  if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
158    switch (II->getIntrinsicID()) {
159    case Intrinsic::lifetime_start:
160    case Intrinsic::lifetime_end:
161    case Intrinsic::invariant_start:
162      Loc = AliasAnalysis::Location(II->getArgOperand(1),
163                                    cast<ConstantInt>(II->getArgOperand(0))
164                                      ->getZExtValue(),
165                                    II->getMetadata(LLVMContext::MD_tbaa));
166      // These intrinsics don't really modify the memory, but returning Mod
167      // will allow them to be handled conservatively.
168      return AliasAnalysis::Mod;
169    case Intrinsic::invariant_end:
170      Loc = AliasAnalysis::Location(II->getArgOperand(2),
171                                    cast<ConstantInt>(II->getArgOperand(1))
172                                      ->getZExtValue(),
173                                    II->getMetadata(LLVMContext::MD_tbaa));
174      // These intrinsics don't really modify the memory, but returning Mod
175      // will allow them to be handled conservatively.
176      return AliasAnalysis::Mod;
177    default:
178      break;
179    }
180
181  // Otherwise, just do the coarse-grained thing that always works.
182  if (Inst->mayWriteToMemory())
183    return AliasAnalysis::ModRef;
184  if (Inst->mayReadFromMemory())
185    return AliasAnalysis::Ref;
186  return AliasAnalysis::NoModRef;
187}
188
189/// getCallSiteDependencyFrom - Private helper for finding the local
190/// dependencies of a call site.
191MemDepResult MemoryDependenceAnalysis::
192getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall,
193                          BasicBlock::iterator ScanIt, BasicBlock *BB) {
194  unsigned Limit = BlockScanLimit;
195
196  // Walk backwards through the block, looking for dependencies
197  while (ScanIt != BB->begin()) {
198    // Limit the amount of scanning we do so we don't end up with quadratic
199    // running time on extreme testcases.
200    --Limit;
201    if (!Limit)
202      return MemDepResult::getUnknown();
203
204    Instruction *Inst = --ScanIt;
205
206    // If this inst is a memory op, get the pointer it accessed
207    AliasAnalysis::Location Loc;
208    AliasAnalysis::ModRefResult MR = GetLocation(Inst, Loc, AA);
209    if (Loc.Ptr) {
210      // A simple instruction.
211      if (AA->getModRefInfo(CS, Loc) != AliasAnalysis::NoModRef)
212        return MemDepResult::getClobber(Inst);
213      continue;
214    }
215
216    if (CallSite InstCS = cast<Value>(Inst)) {
217      // Debug intrinsics don't cause dependences.
218      if (isa<DbgInfoIntrinsic>(Inst)) continue;
219      // If these two calls do not interfere, look past it.
220      switch (AA->getModRefInfo(CS, InstCS)) {
221      case AliasAnalysis::NoModRef:
222        // If the two calls are the same, return InstCS as a Def, so that
223        // CS can be found redundant and eliminated.
224        if (isReadOnlyCall && !(MR & AliasAnalysis::Mod) &&
225            CS.getInstruction()->isIdenticalToWhenDefined(Inst))
226          return MemDepResult::getDef(Inst);
227
228        // Otherwise if the two calls don't interact (e.g. InstCS is readnone)
229        // keep scanning.
230        break;
231      default:
232        return MemDepResult::getClobber(Inst);
233      }
234    }
235  }
236
237  // No dependence found.  If this is the entry block of the function, it is
238  // unknown, otherwise it is non-local.
239  if (BB != &BB->getParent()->getEntryBlock())
240    return MemDepResult::getNonLocal();
241  return MemDepResult::getNonFuncLocal();
242}
243
244/// isLoadLoadClobberIfExtendedToFullWidth - Return true if LI is a load that
245/// would fully overlap MemLoc if done as a wider legal integer load.
246///
247/// MemLocBase, MemLocOffset are lazily computed here the first time the
248/// base/offs of memloc is needed.
249static bool
250isLoadLoadClobberIfExtendedToFullWidth(const AliasAnalysis::Location &MemLoc,
251                                       const Value *&MemLocBase,
252                                       int64_t &MemLocOffs,
253                                       const LoadInst *LI,
254                                       const TargetData *TD) {
255  // If we have no target data, we can't do this.
256  if (TD == 0) return false;
257
258  // If we haven't already computed the base/offset of MemLoc, do so now.
259  if (MemLocBase == 0)
260    MemLocBase = GetPointerBaseWithConstantOffset(MemLoc.Ptr, MemLocOffs, *TD);
261
262  unsigned Size = MemoryDependenceAnalysis::
263    getLoadLoadClobberFullWidthSize(MemLocBase, MemLocOffs, MemLoc.Size,
264                                    LI, *TD);
265  return Size != 0;
266}
267
268/// getLoadLoadClobberFullWidthSize - This is a little bit of analysis that
269/// looks at a memory location for a load (specified by MemLocBase, Offs,
270/// and Size) and compares it against a load.  If the specified load could
271/// be safely widened to a larger integer load that is 1) still efficient,
272/// 2) safe for the target, and 3) would provide the specified memory
273/// location value, then this function returns the size in bytes of the
274/// load width to use.  If not, this returns zero.
275unsigned MemoryDependenceAnalysis::
276getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
277                                unsigned MemLocSize, const LoadInst *LI,
278                                const TargetData &TD) {
279  // We can only extend simple integer loads.
280  if (!isa<IntegerType>(LI->getType()) || !LI->isSimple()) return 0;
281
282  // Get the base of this load.
283  int64_t LIOffs = 0;
284  const Value *LIBase =
285    GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, TD);
286
287  // If the two pointers are not based on the same pointer, we can't tell that
288  // they are related.
289  if (LIBase != MemLocBase) return 0;
290
291  // Okay, the two values are based on the same pointer, but returned as
292  // no-alias.  This happens when we have things like two byte loads at "P+1"
293  // and "P+3".  Check to see if increasing the size of the "LI" load up to its
294  // alignment (or the largest native integer type) will allow us to load all
295  // the bits required by MemLoc.
296
297  // If MemLoc is before LI, then no widening of LI will help us out.
298  if (MemLocOffs < LIOffs) return 0;
299
300  // Get the alignment of the load in bytes.  We assume that it is safe to load
301  // any legal integer up to this size without a problem.  For example, if we're
302  // looking at an i8 load on x86-32 that is known 1024 byte aligned, we can
303  // widen it up to an i32 load.  If it is known 2-byte aligned, we can widen it
304  // to i16.
305  unsigned LoadAlign = LI->getAlignment();
306
307  int64_t MemLocEnd = MemLocOffs+MemLocSize;
308
309  // If no amount of rounding up will let MemLoc fit into LI, then bail out.
310  if (LIOffs+LoadAlign < MemLocEnd) return 0;
311
312  // This is the size of the load to try.  Start with the next larger power of
313  // two.
314  unsigned NewLoadByteSize = LI->getType()->getPrimitiveSizeInBits()/8U;
315  NewLoadByteSize = NextPowerOf2(NewLoadByteSize);
316
317  while (1) {
318    // If this load size is bigger than our known alignment or would not fit
319    // into a native integer register, then we fail.
320    if (NewLoadByteSize > LoadAlign ||
321        !TD.fitsInLegalInteger(NewLoadByteSize*8))
322      return 0;
323
324    // If a load of this width would include all of MemLoc, then we succeed.
325    if (LIOffs+NewLoadByteSize >= MemLocEnd)
326      return NewLoadByteSize;
327
328    NewLoadByteSize <<= 1;
329  }
330
331  return 0;
332}
333
334/// getPointerDependencyFrom - Return the instruction on which a memory
335/// location depends.  If isLoad is true, this routine ignores may-aliases with
336/// read-only operations.  If isLoad is false, this routine ignores may-aliases
337/// with reads from read-only locations.
338MemDepResult MemoryDependenceAnalysis::
339getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
340                         BasicBlock::iterator ScanIt, BasicBlock *BB) {
341
342  const Value *MemLocBase = 0;
343  int64_t MemLocOffset = 0;
344
345  unsigned Limit = BlockScanLimit;
346
347  // Walk backwards through the basic block, looking for dependencies.
348  while (ScanIt != BB->begin()) {
349    // Limit the amount of scanning we do so we don't end up with quadratic
350    // running time on extreme testcases.
351    --Limit;
352    if (!Limit)
353      return MemDepResult::getUnknown();
354
355    Instruction *Inst = --ScanIt;
356
357    if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
358      // Debug intrinsics don't (and can't) cause dependences.
359      if (isa<DbgInfoIntrinsic>(II)) continue;
360
361      // If we reach a lifetime begin or end marker, then the query ends here
362      // because the value is undefined.
363      if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
364        // FIXME: This only considers queries directly on the invariant-tagged
365        // pointer, not on query pointers that are indexed off of them.  It'd
366        // be nice to handle that at some point (the right approach is to use
367        // GetPointerBaseWithConstantOffset).
368        if (AA->isMustAlias(AliasAnalysis::Location(II->getArgOperand(1)),
369                            MemLoc))
370          return MemDepResult::getDef(II);
371        continue;
372      }
373    }
374
375    // Values depend on loads if the pointers are must aliased.  This means that
376    // a load depends on another must aliased load from the same value.
377    if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
378      // Atomic loads have complications involved.
379      // FIXME: This is overly conservative.
380      if (!LI->isUnordered())
381        return MemDepResult::getClobber(LI);
382
383      AliasAnalysis::Location LoadLoc = AA->getLocation(LI);
384
385      // If we found a pointer, check if it could be the same as our pointer.
386      AliasAnalysis::AliasResult R = AA->alias(LoadLoc, MemLoc);
387
388      if (isLoad) {
389        if (R == AliasAnalysis::NoAlias) {
390          // If this is an over-aligned integer load (for example,
391          // "load i8* %P, align 4") see if it would obviously overlap with the
392          // queried location if widened to a larger load (e.g. if the queried
393          // location is 1 byte at P+1).  If so, return it as a load/load
394          // clobber result, allowing the client to decide to widen the load if
395          // it wants to.
396          if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType()))
397            if (LI->getAlignment()*8 > ITy->getPrimitiveSizeInBits() &&
398                isLoadLoadClobberIfExtendedToFullWidth(MemLoc, MemLocBase,
399                                                       MemLocOffset, LI, TD))
400              return MemDepResult::getClobber(Inst);
401
402          continue;
403        }
404
405        // Must aliased loads are defs of each other.
406        if (R == AliasAnalysis::MustAlias)
407          return MemDepResult::getDef(Inst);
408
409#if 0 // FIXME: Temporarily disabled. GVN is cleverly rewriting loads
410      // in terms of clobbering loads, but since it does this by looking
411      // at the clobbering load directly, it doesn't know about any
412      // phi translation that may have happened along the way.
413
414        // If we have a partial alias, then return this as a clobber for the
415        // client to handle.
416        if (R == AliasAnalysis::PartialAlias)
417          return MemDepResult::getClobber(Inst);
418#endif
419
420        // Random may-alias loads don't depend on each other without a
421        // dependence.
422        continue;
423      }
424
425      // Stores don't depend on other no-aliased accesses.
426      if (R == AliasAnalysis::NoAlias)
427        continue;
428
429      // Stores don't alias loads from read-only memory.
430      if (AA->pointsToConstantMemory(LoadLoc))
431        continue;
432
433      // Stores depend on may/must aliased loads.
434      return MemDepResult::getDef(Inst);
435    }
436
437    if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
438      // Atomic stores have complications involved.
439      // FIXME: This is overly conservative.
440      if (!SI->isUnordered())
441        return MemDepResult::getClobber(SI);
442
443      // If alias analysis can tell that this store is guaranteed to not modify
444      // the query pointer, ignore it.  Use getModRefInfo to handle cases where
445      // the query pointer points to constant memory etc.
446      if (AA->getModRefInfo(SI, MemLoc) == AliasAnalysis::NoModRef)
447        continue;
448
449      // Ok, this store might clobber the query pointer.  Check to see if it is
450      // a must alias: in this case, we want to return this as a def.
451      AliasAnalysis::Location StoreLoc = AA->getLocation(SI);
452
453      // If we found a pointer, check if it could be the same as our pointer.
454      AliasAnalysis::AliasResult R = AA->alias(StoreLoc, MemLoc);
455
456      if (R == AliasAnalysis::NoAlias)
457        continue;
458      if (R == AliasAnalysis::MustAlias)
459        return MemDepResult::getDef(Inst);
460      return MemDepResult::getClobber(Inst);
461    }
462
463    // If this is an allocation, and if we know that the accessed pointer is to
464    // the allocation, return Def.  This means that there is no dependence and
465    // the access can be optimized based on that.  For example, a load could
466    // turn into undef.
467    // Note: Only determine this to be a malloc if Inst is the malloc call, not
468    // a subsequent bitcast of the malloc call result.  There can be stores to
469    // the malloced memory between the malloc call and its bitcast uses, and we
470    // need to continue scanning until the malloc call.
471    if (isa<AllocaInst>(Inst) ||
472        (isa<CallInst>(Inst) && extractMallocCall(Inst))) {
473      const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, TD);
474
475      if (AccessPtr == Inst || AA->isMustAlias(Inst, AccessPtr))
476        return MemDepResult::getDef(Inst);
477      continue;
478    }
479
480    // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
481    switch (AA->getModRefInfo(Inst, MemLoc)) {
482    case AliasAnalysis::NoModRef:
483      // If the call has no effect on the queried pointer, just ignore it.
484      continue;
485    case AliasAnalysis::Mod:
486      return MemDepResult::getClobber(Inst);
487    case AliasAnalysis::Ref:
488      // If the call is known to never store to the pointer, and if this is a
489      // load query, we can safely ignore it (scan past it).
490      if (isLoad)
491        continue;
492    default:
493      // Otherwise, there is a potential dependence.  Return a clobber.
494      return MemDepResult::getClobber(Inst);
495    }
496  }
497
498  // No dependence found.  If this is the entry block of the function, it is
499  // unknown, otherwise it is non-local.
500  if (BB != &BB->getParent()->getEntryBlock())
501    return MemDepResult::getNonLocal();
502  return MemDepResult::getNonFuncLocal();
503}
504
505/// getDependency - Return the instruction on which a memory operation
506/// depends.
507MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) {
508  Instruction *ScanPos = QueryInst;
509
510  // Check for a cached result
511  MemDepResult &LocalCache = LocalDeps[QueryInst];
512
513  // If the cached entry is non-dirty, just return it.  Note that this depends
514  // on MemDepResult's default constructing to 'dirty'.
515  if (!LocalCache.isDirty())
516    return LocalCache;
517
518  // Otherwise, if we have a dirty entry, we know we can start the scan at that
519  // instruction, which may save us some work.
520  if (Instruction *Inst = LocalCache.getInst()) {
521    ScanPos = Inst;
522
523    RemoveFromReverseMap(ReverseLocalDeps, Inst, QueryInst);
524  }
525
526  BasicBlock *QueryParent = QueryInst->getParent();
527
528  // Do the scan.
529  if (BasicBlock::iterator(QueryInst) == QueryParent->begin()) {
530    // No dependence found.  If this is the entry block of the function, it is
531    // unknown, otherwise it is non-local.
532    if (QueryParent != &QueryParent->getParent()->getEntryBlock())
533      LocalCache = MemDepResult::getNonLocal();
534    else
535      LocalCache = MemDepResult::getNonFuncLocal();
536  } else {
537    AliasAnalysis::Location MemLoc;
538    AliasAnalysis::ModRefResult MR = GetLocation(QueryInst, MemLoc, AA);
539    if (MemLoc.Ptr) {
540      // If we can do a pointer scan, make it happen.
541      bool isLoad = !(MR & AliasAnalysis::Mod);
542      if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(QueryInst))
543        isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_start;
544
545      LocalCache = getPointerDependencyFrom(MemLoc, isLoad, ScanPos,
546                                            QueryParent);
547    } else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) {
548      CallSite QueryCS(QueryInst);
549      bool isReadOnly = AA->onlyReadsMemory(QueryCS);
550      LocalCache = getCallSiteDependencyFrom(QueryCS, isReadOnly, ScanPos,
551                                             QueryParent);
552    } else
553      // Non-memory instruction.
554      LocalCache = MemDepResult::getUnknown();
555  }
556
557  // Remember the result!
558  if (Instruction *I = LocalCache.getInst())
559    ReverseLocalDeps[I].insert(QueryInst);
560
561  return LocalCache;
562}
563
564#ifndef NDEBUG
565/// AssertSorted - This method is used when -debug is specified to verify that
566/// cache arrays are properly kept sorted.
567static void AssertSorted(MemoryDependenceAnalysis::NonLocalDepInfo &Cache,
568                         int Count = -1) {
569  if (Count == -1) Count = Cache.size();
570  if (Count == 0) return;
571
572  for (unsigned i = 1; i != unsigned(Count); ++i)
573    assert(!(Cache[i] < Cache[i-1]) && "Cache isn't sorted!");
574}
575#endif
576
577/// getNonLocalCallDependency - Perform a full dependency query for the
578/// specified call, returning the set of blocks that the value is
579/// potentially live across.  The returned set of results will include a
580/// "NonLocal" result for all blocks where the value is live across.
581///
582/// This method assumes the instruction returns a "NonLocal" dependency
583/// within its own block.
584///
585/// This returns a reference to an internal data structure that may be
586/// invalidated on the next non-local query or when an instruction is
587/// removed.  Clients must copy this data if they want it around longer than
588/// that.
589const MemoryDependenceAnalysis::NonLocalDepInfo &
590MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) {
591  assert(getDependency(QueryCS.getInstruction()).isNonLocal() &&
592 "getNonLocalCallDependency should only be used on calls with non-local deps!");
593  PerInstNLInfo &CacheP = NonLocalDeps[QueryCS.getInstruction()];
594  NonLocalDepInfo &Cache = CacheP.first;
595
596  /// DirtyBlocks - This is the set of blocks that need to be recomputed.  In
597  /// the cached case, this can happen due to instructions being deleted etc. In
598  /// the uncached case, this starts out as the set of predecessors we care
599  /// about.
600  SmallVector<BasicBlock*, 32> DirtyBlocks;
601
602  if (!Cache.empty()) {
603    // Okay, we have a cache entry.  If we know it is not dirty, just return it
604    // with no computation.
605    if (!CacheP.second) {
606      ++NumCacheNonLocal;
607      return Cache;
608    }
609
610    // If we already have a partially computed set of results, scan them to
611    // determine what is dirty, seeding our initial DirtyBlocks worklist.
612    for (NonLocalDepInfo::iterator I = Cache.begin(), E = Cache.end();
613       I != E; ++I)
614      if (I->getResult().isDirty())
615        DirtyBlocks.push_back(I->getBB());
616
617    // Sort the cache so that we can do fast binary search lookups below.
618    std::sort(Cache.begin(), Cache.end());
619
620    ++NumCacheDirtyNonLocal;
621    //cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: "
622    //     << Cache.size() << " cached: " << *QueryInst;
623  } else {
624    // Seed DirtyBlocks with each of the preds of QueryInst's block.
625    BasicBlock *QueryBB = QueryCS.getInstruction()->getParent();
626    for (BasicBlock **PI = PredCache->GetPreds(QueryBB); *PI; ++PI)
627      DirtyBlocks.push_back(*PI);
628    ++NumUncacheNonLocal;
629  }
630
631  // isReadonlyCall - If this is a read-only call, we can be more aggressive.
632  bool isReadonlyCall = AA->onlyReadsMemory(QueryCS);
633
634  SmallPtrSet<BasicBlock*, 64> Visited;
635
636  unsigned NumSortedEntries = Cache.size();
637  DEBUG(AssertSorted(Cache));
638
639  // Iterate while we still have blocks to update.
640  while (!DirtyBlocks.empty()) {
641    BasicBlock *DirtyBB = DirtyBlocks.back();
642    DirtyBlocks.pop_back();
643
644    // Already processed this block?
645    if (!Visited.insert(DirtyBB))
646      continue;
647
648    // Do a binary search to see if we already have an entry for this block in
649    // the cache set.  If so, find it.
650    DEBUG(AssertSorted(Cache, NumSortedEntries));
651    NonLocalDepInfo::iterator Entry =
652      std::upper_bound(Cache.begin(), Cache.begin()+NumSortedEntries,
653                       NonLocalDepEntry(DirtyBB));
654    if (Entry != Cache.begin() && prior(Entry)->getBB() == DirtyBB)
655      --Entry;
656
657    NonLocalDepEntry *ExistingResult = 0;
658    if (Entry != Cache.begin()+NumSortedEntries &&
659        Entry->getBB() == DirtyBB) {
660      // If we already have an entry, and if it isn't already dirty, the block
661      // is done.
662      if (!Entry->getResult().isDirty())
663        continue;
664
665      // Otherwise, remember this slot so we can update the value.
666      ExistingResult = &*Entry;
667    }
668
669    // If the dirty entry has a pointer, start scanning from it so we don't have
670    // to rescan the entire block.
671    BasicBlock::iterator ScanPos = DirtyBB->end();
672    if (ExistingResult) {
673      if (Instruction *Inst = ExistingResult->getResult().getInst()) {
674        ScanPos = Inst;
675        // We're removing QueryInst's use of Inst.
676        RemoveFromReverseMap(ReverseNonLocalDeps, Inst,
677                             QueryCS.getInstruction());
678      }
679    }
680
681    // Find out if this block has a local dependency for QueryInst.
682    MemDepResult Dep;
683
684    if (ScanPos != DirtyBB->begin()) {
685      Dep = getCallSiteDependencyFrom(QueryCS, isReadonlyCall,ScanPos, DirtyBB);
686    } else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) {
687      // No dependence found.  If this is the entry block of the function, it is
688      // a clobber, otherwise it is unknown.
689      Dep = MemDepResult::getNonLocal();
690    } else {
691      Dep = MemDepResult::getNonFuncLocal();
692    }
693
694    // If we had a dirty entry for the block, update it.  Otherwise, just add
695    // a new entry.
696    if (ExistingResult)
697      ExistingResult->setResult(Dep);
698    else
699      Cache.push_back(NonLocalDepEntry(DirtyBB, Dep));
700
701    // If the block has a dependency (i.e. it isn't completely transparent to
702    // the value), remember the association!
703    if (!Dep.isNonLocal()) {
704      // Keep the ReverseNonLocalDeps map up to date so we can efficiently
705      // update this when we remove instructions.
706      if (Instruction *Inst = Dep.getInst())
707        ReverseNonLocalDeps[Inst].insert(QueryCS.getInstruction());
708    } else {
709
710      // If the block *is* completely transparent to the load, we need to check
711      // the predecessors of this block.  Add them to our worklist.
712      for (BasicBlock **PI = PredCache->GetPreds(DirtyBB); *PI; ++PI)
713        DirtyBlocks.push_back(*PI);
714    }
715  }
716
717  return Cache;
718}
719
720/// getNonLocalPointerDependency - Perform a full dependency query for an
721/// access to the specified (non-volatile) memory location, returning the
722/// set of instructions that either define or clobber the value.
723///
724/// This method assumes the pointer has a "NonLocal" dependency within its
725/// own block.
726///
727void MemoryDependenceAnalysis::
728getNonLocalPointerDependency(const AliasAnalysis::Location &Loc, bool isLoad,
729                             BasicBlock *FromBB,
730                             SmallVectorImpl<NonLocalDepResult> &Result) {
731  assert(Loc.Ptr->getType()->isPointerTy() &&
732         "Can't get pointer deps of a non-pointer!");
733  Result.clear();
734
735  PHITransAddr Address(const_cast<Value *>(Loc.Ptr), TD);
736
737  // This is the set of blocks we've inspected, and the pointer we consider in
738  // each block.  Because of critical edges, we currently bail out if querying
739  // a block with multiple different pointers.  This can happen during PHI
740  // translation.
741  DenseMap<BasicBlock*, Value*> Visited;
742  if (!getNonLocalPointerDepFromBB(Address, Loc, isLoad, FromBB,
743                                   Result, Visited, true))
744    return;
745  Result.clear();
746  Result.push_back(NonLocalDepResult(FromBB,
747                                     MemDepResult::getUnknown(),
748                                     const_cast<Value *>(Loc.Ptr)));
749}
750
751/// GetNonLocalInfoForBlock - Compute the memdep value for BB with
752/// Pointer/PointeeSize using either cached information in Cache or by doing a
753/// lookup (which may use dirty cache info if available).  If we do a lookup,
754/// add the result to the cache.
755MemDepResult MemoryDependenceAnalysis::
756GetNonLocalInfoForBlock(const AliasAnalysis::Location &Loc,
757                        bool isLoad, BasicBlock *BB,
758                        NonLocalDepInfo *Cache, unsigned NumSortedEntries) {
759
760  // Do a binary search to see if we already have an entry for this block in
761  // the cache set.  If so, find it.
762  NonLocalDepInfo::iterator Entry =
763    std::upper_bound(Cache->begin(), Cache->begin()+NumSortedEntries,
764                     NonLocalDepEntry(BB));
765  if (Entry != Cache->begin() && (Entry-1)->getBB() == BB)
766    --Entry;
767
768  NonLocalDepEntry *ExistingResult = 0;
769  if (Entry != Cache->begin()+NumSortedEntries && Entry->getBB() == BB)
770    ExistingResult = &*Entry;
771
772  // If we have a cached entry, and it is non-dirty, use it as the value for
773  // this dependency.
774  if (ExistingResult && !ExistingResult->getResult().isDirty()) {
775    ++NumCacheNonLocalPtr;
776    return ExistingResult->getResult();
777  }
778
779  // Otherwise, we have to scan for the value.  If we have a dirty cache
780  // entry, start scanning from its position, otherwise we scan from the end
781  // of the block.
782  BasicBlock::iterator ScanPos = BB->end();
783  if (ExistingResult && ExistingResult->getResult().getInst()) {
784    assert(ExistingResult->getResult().getInst()->getParent() == BB &&
785           "Instruction invalidated?");
786    ++NumCacheDirtyNonLocalPtr;
787    ScanPos = ExistingResult->getResult().getInst();
788
789    // Eliminating the dirty entry from 'Cache', so update the reverse info.
790    ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
791    RemoveFromReverseMap(ReverseNonLocalPtrDeps, ScanPos, CacheKey);
792  } else {
793    ++NumUncacheNonLocalPtr;
794  }
795
796  // Scan the block for the dependency.
797  MemDepResult Dep = getPointerDependencyFrom(Loc, isLoad, ScanPos, BB);
798
799  // If we had a dirty entry for the block, update it.  Otherwise, just add
800  // a new entry.
801  if (ExistingResult)
802    ExistingResult->setResult(Dep);
803  else
804    Cache->push_back(NonLocalDepEntry(BB, Dep));
805
806  // If the block has a dependency (i.e. it isn't completely transparent to
807  // the value), remember the reverse association because we just added it
808  // to Cache!
809  if (!Dep.isDef() && !Dep.isClobber())
810    return Dep;
811
812  // Keep the ReverseNonLocalPtrDeps map up to date so we can efficiently
813  // update MemDep when we remove instructions.
814  Instruction *Inst = Dep.getInst();
815  assert(Inst && "Didn't depend on anything?");
816  ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
817  ReverseNonLocalPtrDeps[Inst].insert(CacheKey);
818  return Dep;
819}
820
821/// SortNonLocalDepInfoCache - Sort the a NonLocalDepInfo cache, given a certain
822/// number of elements in the array that are already properly ordered.  This is
823/// optimized for the case when only a few entries are added.
824static void
825SortNonLocalDepInfoCache(MemoryDependenceAnalysis::NonLocalDepInfo &Cache,
826                         unsigned NumSortedEntries) {
827  switch (Cache.size() - NumSortedEntries) {
828  case 0:
829    // done, no new entries.
830    break;
831  case 2: {
832    // Two new entries, insert the last one into place.
833    NonLocalDepEntry Val = Cache.back();
834    Cache.pop_back();
835    MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry =
836      std::upper_bound(Cache.begin(), Cache.end()-1, Val);
837    Cache.insert(Entry, Val);
838    // FALL THROUGH.
839  }
840  case 1:
841    // One new entry, Just insert the new value at the appropriate position.
842    if (Cache.size() != 1) {
843      NonLocalDepEntry Val = Cache.back();
844      Cache.pop_back();
845      MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry =
846        std::upper_bound(Cache.begin(), Cache.end(), Val);
847      Cache.insert(Entry, Val);
848    }
849    break;
850  default:
851    // Added many values, do a full scale sort.
852    std::sort(Cache.begin(), Cache.end());
853    break;
854  }
855}
856
857/// getNonLocalPointerDepFromBB - Perform a dependency query based on
858/// pointer/pointeesize starting at the end of StartBB.  Add any clobber/def
859/// results to the results vector and keep track of which blocks are visited in
860/// 'Visited'.
861///
862/// This has special behavior for the first block queries (when SkipFirstBlock
863/// is true).  In this special case, it ignores the contents of the specified
864/// block and starts returning dependence info for its predecessors.
865///
866/// This function returns false on success, or true to indicate that it could
867/// not compute dependence information for some reason.  This should be treated
868/// as a clobber dependence on the first instruction in the predecessor block.
869bool MemoryDependenceAnalysis::
870getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
871                            const AliasAnalysis::Location &Loc,
872                            bool isLoad, BasicBlock *StartBB,
873                            SmallVectorImpl<NonLocalDepResult> &Result,
874                            DenseMap<BasicBlock*, Value*> &Visited,
875                            bool SkipFirstBlock) {
876
877  // Look up the cached info for Pointer.
878  ValueIsLoadPair CacheKey(Pointer.getAddr(), isLoad);
879
880  // Set up a temporary NLPI value. If the map doesn't yet have an entry for
881  // CacheKey, this value will be inserted as the associated value. Otherwise,
882  // it'll be ignored, and we'll have to check to see if the cached size and
883  // tbaa tag are consistent with the current query.
884  NonLocalPointerInfo InitialNLPI;
885  InitialNLPI.Size = Loc.Size;
886  InitialNLPI.TBAATag = Loc.TBAATag;
887
888  // Get the NLPI for CacheKey, inserting one into the map if it doesn't
889  // already have one.
890  std::pair<CachedNonLocalPointerInfo::iterator, bool> Pair =
891    NonLocalPointerDeps.insert(std::make_pair(CacheKey, InitialNLPI));
892  NonLocalPointerInfo *CacheInfo = &Pair.first->second;
893
894  // If we already have a cache entry for this CacheKey, we may need to do some
895  // work to reconcile the cache entry and the current query.
896  if (!Pair.second) {
897    if (CacheInfo->Size < Loc.Size) {
898      // The query's Size is greater than the cached one. Throw out the
899      // cached data and procede with the query at the greater size.
900      CacheInfo->Pair = BBSkipFirstBlockPair();
901      CacheInfo->Size = Loc.Size;
902      for (NonLocalDepInfo::iterator DI = CacheInfo->NonLocalDeps.begin(),
903           DE = CacheInfo->NonLocalDeps.end(); DI != DE; ++DI)
904        if (Instruction *Inst = DI->getResult().getInst())
905          RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey);
906      CacheInfo->NonLocalDeps.clear();
907    } else if (CacheInfo->Size > Loc.Size) {
908      // This query's Size is less than the cached one. Conservatively restart
909      // the query using the greater size.
910      return getNonLocalPointerDepFromBB(Pointer,
911                                         Loc.getWithNewSize(CacheInfo->Size),
912                                         isLoad, StartBB, Result, Visited,
913                                         SkipFirstBlock);
914    }
915
916    // If the query's TBAATag is inconsistent with the cached one,
917    // conservatively throw out the cached data and restart the query with
918    // no tag if needed.
919    if (CacheInfo->TBAATag != Loc.TBAATag) {
920      if (CacheInfo->TBAATag) {
921        CacheInfo->Pair = BBSkipFirstBlockPair();
922        CacheInfo->TBAATag = 0;
923        for (NonLocalDepInfo::iterator DI = CacheInfo->NonLocalDeps.begin(),
924             DE = CacheInfo->NonLocalDeps.end(); DI != DE; ++DI)
925          if (Instruction *Inst = DI->getResult().getInst())
926            RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey);
927        CacheInfo->NonLocalDeps.clear();
928      }
929      if (Loc.TBAATag)
930        return getNonLocalPointerDepFromBB(Pointer, Loc.getWithoutTBAATag(),
931                                           isLoad, StartBB, Result, Visited,
932                                           SkipFirstBlock);
933    }
934  }
935
936  NonLocalDepInfo *Cache = &CacheInfo->NonLocalDeps;
937
938  // If we have valid cached information for exactly the block we are
939  // investigating, just return it with no recomputation.
940  if (CacheInfo->Pair == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) {
941    // We have a fully cached result for this query then we can just return the
942    // cached results and populate the visited set.  However, we have to verify
943    // that we don't already have conflicting results for these blocks.  Check
944    // to ensure that if a block in the results set is in the visited set that
945    // it was for the same pointer query.
946    if (!Visited.empty()) {
947      for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end();
948           I != E; ++I) {
949        DenseMap<BasicBlock*, Value*>::iterator VI = Visited.find(I->getBB());
950        if (VI == Visited.end() || VI->second == Pointer.getAddr())
951          continue;
952
953        // We have a pointer mismatch in a block.  Just return clobber, saying
954        // that something was clobbered in this result.  We could also do a
955        // non-fully cached query, but there is little point in doing this.
956        return true;
957      }
958    }
959
960    Value *Addr = Pointer.getAddr();
961    for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end();
962         I != E; ++I) {
963      Visited.insert(std::make_pair(I->getBB(), Addr));
964      if (!I->getResult().isNonLocal())
965        Result.push_back(NonLocalDepResult(I->getBB(), I->getResult(), Addr));
966    }
967    ++NumCacheCompleteNonLocalPtr;
968    return false;
969  }
970
971  // Otherwise, either this is a new block, a block with an invalid cache
972  // pointer or one that we're about to invalidate by putting more info into it
973  // than its valid cache info.  If empty, the result will be valid cache info,
974  // otherwise it isn't.
975  if (Cache->empty())
976    CacheInfo->Pair = BBSkipFirstBlockPair(StartBB, SkipFirstBlock);
977  else
978    CacheInfo->Pair = BBSkipFirstBlockPair();
979
980  SmallVector<BasicBlock*, 32> Worklist;
981  Worklist.push_back(StartBB);
982
983  // PredList used inside loop.
984  SmallVector<std::pair<BasicBlock*, PHITransAddr>, 16> PredList;
985
986  // Keep track of the entries that we know are sorted.  Previously cached
987  // entries will all be sorted.  The entries we add we only sort on demand (we
988  // don't insert every element into its sorted position).  We know that we
989  // won't get any reuse from currently inserted values, because we don't
990  // revisit blocks after we insert info for them.
991  unsigned NumSortedEntries = Cache->size();
992  DEBUG(AssertSorted(*Cache));
993
994  while (!Worklist.empty()) {
995    BasicBlock *BB = Worklist.pop_back_val();
996
997    // Skip the first block if we have it.
998    if (!SkipFirstBlock) {
999      // Analyze the dependency of *Pointer in FromBB.  See if we already have
1000      // been here.
1001      assert(Visited.count(BB) && "Should check 'visited' before adding to WL");
1002
1003      // Get the dependency info for Pointer in BB.  If we have cached
1004      // information, we will use it, otherwise we compute it.
1005      DEBUG(AssertSorted(*Cache, NumSortedEntries));
1006      MemDepResult Dep = GetNonLocalInfoForBlock(Loc, isLoad, BB, Cache,
1007                                                 NumSortedEntries);
1008
1009      // If we got a Def or Clobber, add this to the list of results.
1010      if (!Dep.isNonLocal()) {
1011        Result.push_back(NonLocalDepResult(BB, Dep, Pointer.getAddr()));
1012        continue;
1013      }
1014    }
1015
1016    // If 'Pointer' is an instruction defined in this block, then we need to do
1017    // phi translation to change it into a value live in the predecessor block.
1018    // If not, we just add the predecessors to the worklist and scan them with
1019    // the same Pointer.
1020    if (!Pointer.NeedsPHITranslationFromBlock(BB)) {
1021      SkipFirstBlock = false;
1022      SmallVector<BasicBlock*, 16> NewBlocks;
1023      for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) {
1024        // Verify that we haven't looked at this block yet.
1025        std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool>
1026          InsertRes = Visited.insert(std::make_pair(*PI, Pointer.getAddr()));
1027        if (InsertRes.second) {
1028          // First time we've looked at *PI.
1029          NewBlocks.push_back(*PI);
1030          continue;
1031        }
1032
1033        // If we have seen this block before, but it was with a different
1034        // pointer then we have a phi translation failure and we have to treat
1035        // this as a clobber.
1036        if (InsertRes.first->second != Pointer.getAddr()) {
1037          // Make sure to clean up the Visited map before continuing on to
1038          // PredTranslationFailure.
1039          for (unsigned i = 0; i < NewBlocks.size(); i++)
1040            Visited.erase(NewBlocks[i]);
1041          goto PredTranslationFailure;
1042        }
1043      }
1044      Worklist.append(NewBlocks.begin(), NewBlocks.end());
1045      continue;
1046    }
1047
1048    // We do need to do phi translation, if we know ahead of time we can't phi
1049    // translate this value, don't even try.
1050    if (!Pointer.IsPotentiallyPHITranslatable())
1051      goto PredTranslationFailure;
1052
1053    // We may have added values to the cache list before this PHI translation.
1054    // If so, we haven't done anything to ensure that the cache remains sorted.
1055    // Sort it now (if needed) so that recursive invocations of
1056    // getNonLocalPointerDepFromBB and other routines that could reuse the cache
1057    // value will only see properly sorted cache arrays.
1058    if (Cache && NumSortedEntries != Cache->size()) {
1059      SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
1060      NumSortedEntries = Cache->size();
1061    }
1062    Cache = 0;
1063
1064    PredList.clear();
1065    for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) {
1066      BasicBlock *Pred = *PI;
1067      PredList.push_back(std::make_pair(Pred, Pointer));
1068
1069      // Get the PHI translated pointer in this predecessor.  This can fail if
1070      // not translatable, in which case the getAddr() returns null.
1071      PHITransAddr &PredPointer = PredList.back().second;
1072      PredPointer.PHITranslateValue(BB, Pred, 0);
1073
1074      Value *PredPtrVal = PredPointer.getAddr();
1075
1076      // Check to see if we have already visited this pred block with another
1077      // pointer.  If so, we can't do this lookup.  This failure can occur
1078      // with PHI translation when a critical edge exists and the PHI node in
1079      // the successor translates to a pointer value different than the
1080      // pointer the block was first analyzed with.
1081      std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool>
1082        InsertRes = Visited.insert(std::make_pair(Pred, PredPtrVal));
1083
1084      if (!InsertRes.second) {
1085        // We found the pred; take it off the list of preds to visit.
1086        PredList.pop_back();
1087
1088        // If the predecessor was visited with PredPtr, then we already did
1089        // the analysis and can ignore it.
1090        if (InsertRes.first->second == PredPtrVal)
1091          continue;
1092
1093        // Otherwise, the block was previously analyzed with a different
1094        // pointer.  We can't represent the result of this case, so we just
1095        // treat this as a phi translation failure.
1096
1097        // Make sure to clean up the Visited map before continuing on to
1098        // PredTranslationFailure.
1099        for (unsigned i = 0; i < PredList.size(); i++)
1100          Visited.erase(PredList[i].first);
1101
1102        goto PredTranslationFailure;
1103      }
1104    }
1105
1106    // Actually process results here; this need to be a separate loop to avoid
1107    // calling getNonLocalPointerDepFromBB for blocks we don't want to return
1108    // any results for.  (getNonLocalPointerDepFromBB will modify our
1109    // datastructures in ways the code after the PredTranslationFailure label
1110    // doesn't expect.)
1111    for (unsigned i = 0; i < PredList.size(); i++) {
1112      BasicBlock *Pred = PredList[i].first;
1113      PHITransAddr &PredPointer = PredList[i].second;
1114      Value *PredPtrVal = PredPointer.getAddr();
1115
1116      bool CanTranslate = true;
1117      // If PHI translation was unable to find an available pointer in this
1118      // predecessor, then we have to assume that the pointer is clobbered in
1119      // that predecessor.  We can still do PRE of the load, which would insert
1120      // a computation of the pointer in this predecessor.
1121      if (PredPtrVal == 0)
1122        CanTranslate = false;
1123
1124      // FIXME: it is entirely possible that PHI translating will end up with
1125      // the same value.  Consider PHI translating something like:
1126      // X = phi [x, bb1], [y, bb2].  PHI translating for bb1 doesn't *need*
1127      // to recurse here, pedantically speaking.
1128
1129      // If getNonLocalPointerDepFromBB fails here, that means the cached
1130      // result conflicted with the Visited list; we have to conservatively
1131      // assume it is unknown, but this also does not block PRE of the load.
1132      if (!CanTranslate ||
1133          getNonLocalPointerDepFromBB(PredPointer,
1134                                      Loc.getWithNewPtr(PredPtrVal),
1135                                      isLoad, Pred,
1136                                      Result, Visited)) {
1137        // Add the entry to the Result list.
1138        NonLocalDepResult Entry(Pred, MemDepResult::getUnknown(), PredPtrVal);
1139        Result.push_back(Entry);
1140
1141        // Since we had a phi translation failure, the cache for CacheKey won't
1142        // include all of the entries that we need to immediately satisfy future
1143        // queries.  Mark this in NonLocalPointerDeps by setting the
1144        // BBSkipFirstBlockPair pointer to null.  This requires reuse of the
1145        // cached value to do more work but not miss the phi trans failure.
1146        NonLocalPointerInfo &NLPI = NonLocalPointerDeps[CacheKey];
1147        NLPI.Pair = BBSkipFirstBlockPair();
1148        continue;
1149      }
1150    }
1151
1152    // Refresh the CacheInfo/Cache pointer so that it isn't invalidated.
1153    CacheInfo = &NonLocalPointerDeps[CacheKey];
1154    Cache = &CacheInfo->NonLocalDeps;
1155    NumSortedEntries = Cache->size();
1156
1157    // Since we did phi translation, the "Cache" set won't contain all of the
1158    // results for the query.  This is ok (we can still use it to accelerate
1159    // specific block queries) but we can't do the fastpath "return all
1160    // results from the set"  Clear out the indicator for this.
1161    CacheInfo->Pair = BBSkipFirstBlockPair();
1162    SkipFirstBlock = false;
1163    continue;
1164
1165  PredTranslationFailure:
1166    // The following code is "failure"; we can't produce a sane translation
1167    // for the given block.  It assumes that we haven't modified any of
1168    // our datastructures while processing the current block.
1169
1170    if (Cache == 0) {
1171      // Refresh the CacheInfo/Cache pointer if it got invalidated.
1172      CacheInfo = &NonLocalPointerDeps[CacheKey];
1173      Cache = &CacheInfo->NonLocalDeps;
1174      NumSortedEntries = Cache->size();
1175    }
1176
1177    // Since we failed phi translation, the "Cache" set won't contain all of the
1178    // results for the query.  This is ok (we can still use it to accelerate
1179    // specific block queries) but we can't do the fastpath "return all
1180    // results from the set".  Clear out the indicator for this.
1181    CacheInfo->Pair = BBSkipFirstBlockPair();
1182
1183    // If *nothing* works, mark the pointer as unknown.
1184    //
1185    // If this is the magic first block, return this as a clobber of the whole
1186    // incoming value.  Since we can't phi translate to one of the predecessors,
1187    // we have to bail out.
1188    if (SkipFirstBlock)
1189      return true;
1190
1191    for (NonLocalDepInfo::reverse_iterator I = Cache->rbegin(); ; ++I) {
1192      assert(I != Cache->rend() && "Didn't find current block??");
1193      if (I->getBB() != BB)
1194        continue;
1195
1196      assert(I->getResult().isNonLocal() &&
1197             "Should only be here with transparent block");
1198      I->setResult(MemDepResult::getUnknown());
1199      Result.push_back(NonLocalDepResult(I->getBB(), I->getResult(),
1200                                         Pointer.getAddr()));
1201      break;
1202    }
1203  }
1204
1205  // Okay, we're done now.  If we added new values to the cache, re-sort it.
1206  SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
1207  DEBUG(AssertSorted(*Cache));
1208  return false;
1209}
1210
1211/// RemoveCachedNonLocalPointerDependencies - If P exists in
1212/// CachedNonLocalPointerInfo, remove it.
1213void MemoryDependenceAnalysis::
1214RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair P) {
1215  CachedNonLocalPointerInfo::iterator It =
1216    NonLocalPointerDeps.find(P);
1217  if (It == NonLocalPointerDeps.end()) return;
1218
1219  // Remove all of the entries in the BB->val map.  This involves removing
1220  // instructions from the reverse map.
1221  NonLocalDepInfo &PInfo = It->second.NonLocalDeps;
1222
1223  for (unsigned i = 0, e = PInfo.size(); i != e; ++i) {
1224    Instruction *Target = PInfo[i].getResult().getInst();
1225    if (Target == 0) continue;  // Ignore non-local dep results.
1226    assert(Target->getParent() == PInfo[i].getBB());
1227
1228    // Eliminating the dirty entry from 'Cache', so update the reverse info.
1229    RemoveFromReverseMap(ReverseNonLocalPtrDeps, Target, P);
1230  }
1231
1232  // Remove P from NonLocalPointerDeps (which deletes NonLocalDepInfo).
1233  NonLocalPointerDeps.erase(It);
1234}
1235
1236
1237/// invalidateCachedPointerInfo - This method is used to invalidate cached
1238/// information about the specified pointer, because it may be too
1239/// conservative in memdep.  This is an optional call that can be used when
1240/// the client detects an equivalence between the pointer and some other
1241/// value and replaces the other value with ptr. This can make Ptr available
1242/// in more places that cached info does not necessarily keep.
1243void MemoryDependenceAnalysis::invalidateCachedPointerInfo(Value *Ptr) {
1244  // If Ptr isn't really a pointer, just ignore it.
1245  if (!Ptr->getType()->isPointerTy()) return;
1246  // Flush store info for the pointer.
1247  RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, false));
1248  // Flush load info for the pointer.
1249  RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, true));
1250}
1251
1252/// invalidateCachedPredecessors - Clear the PredIteratorCache info.
1253/// This needs to be done when the CFG changes, e.g., due to splitting
1254/// critical edges.
1255void MemoryDependenceAnalysis::invalidateCachedPredecessors() {
1256  PredCache->clear();
1257}
1258
1259/// removeInstruction - Remove an instruction from the dependence analysis,
1260/// updating the dependence of instructions that previously depended on it.
1261/// This method attempts to keep the cache coherent using the reverse map.
1262void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) {
1263  // Walk through the Non-local dependencies, removing this one as the value
1264  // for any cached queries.
1265  NonLocalDepMapType::iterator NLDI = NonLocalDeps.find(RemInst);
1266  if (NLDI != NonLocalDeps.end()) {
1267    NonLocalDepInfo &BlockMap = NLDI->second.first;
1268    for (NonLocalDepInfo::iterator DI = BlockMap.begin(), DE = BlockMap.end();
1269         DI != DE; ++DI)
1270      if (Instruction *Inst = DI->getResult().getInst())
1271        RemoveFromReverseMap(ReverseNonLocalDeps, Inst, RemInst);
1272    NonLocalDeps.erase(NLDI);
1273  }
1274
1275  // If we have a cached local dependence query for this instruction, remove it.
1276  //
1277  LocalDepMapType::iterator LocalDepEntry = LocalDeps.find(RemInst);
1278  if (LocalDepEntry != LocalDeps.end()) {
1279    // Remove us from DepInst's reverse set now that the local dep info is gone.
1280    if (Instruction *Inst = LocalDepEntry->second.getInst())
1281      RemoveFromReverseMap(ReverseLocalDeps, Inst, RemInst);
1282
1283    // Remove this local dependency info.
1284    LocalDeps.erase(LocalDepEntry);
1285  }
1286
1287  // If we have any cached pointer dependencies on this instruction, remove
1288  // them.  If the instruction has non-pointer type, then it can't be a pointer
1289  // base.
1290
1291  // Remove it from both the load info and the store info.  The instruction
1292  // can't be in either of these maps if it is non-pointer.
1293  if (RemInst->getType()->isPointerTy()) {
1294    RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, false));
1295    RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, true));
1296  }
1297
1298  // Loop over all of the things that depend on the instruction we're removing.
1299  //
1300  SmallVector<std::pair<Instruction*, Instruction*>, 8> ReverseDepsToAdd;
1301
1302  // If we find RemInst as a clobber or Def in any of the maps for other values,
1303  // we need to replace its entry with a dirty version of the instruction after
1304  // it.  If RemInst is a terminator, we use a null dirty value.
1305  //
1306  // Using a dirty version of the instruction after RemInst saves having to scan
1307  // the entire block to get to this point.
1308  MemDepResult NewDirtyVal;
1309  if (!RemInst->isTerminator())
1310    NewDirtyVal = MemDepResult::getDirty(++BasicBlock::iterator(RemInst));
1311
1312  ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst);
1313  if (ReverseDepIt != ReverseLocalDeps.end()) {
1314    SmallPtrSet<Instruction*, 4> &ReverseDeps = ReverseDepIt->second;
1315    // RemInst can't be the terminator if it has local stuff depending on it.
1316    assert(!ReverseDeps.empty() && !isa<TerminatorInst>(RemInst) &&
1317           "Nothing can locally depend on a terminator");
1318
1319    for (SmallPtrSet<Instruction*, 4>::iterator I = ReverseDeps.begin(),
1320         E = ReverseDeps.end(); I != E; ++I) {
1321      Instruction *InstDependingOnRemInst = *I;
1322      assert(InstDependingOnRemInst != RemInst &&
1323             "Already removed our local dep info");
1324
1325      LocalDeps[InstDependingOnRemInst] = NewDirtyVal;
1326
1327      // Make sure to remember that new things depend on NewDepInst.
1328      assert(NewDirtyVal.getInst() && "There is no way something else can have "
1329             "a local dep on this if it is a terminator!");
1330      ReverseDepsToAdd.push_back(std::make_pair(NewDirtyVal.getInst(),
1331                                                InstDependingOnRemInst));
1332    }
1333
1334    ReverseLocalDeps.erase(ReverseDepIt);
1335
1336    // Add new reverse deps after scanning the set, to avoid invalidating the
1337    // 'ReverseDeps' reference.
1338    while (!ReverseDepsToAdd.empty()) {
1339      ReverseLocalDeps[ReverseDepsToAdd.back().first]
1340        .insert(ReverseDepsToAdd.back().second);
1341      ReverseDepsToAdd.pop_back();
1342    }
1343  }
1344
1345  ReverseDepIt = ReverseNonLocalDeps.find(RemInst);
1346  if (ReverseDepIt != ReverseNonLocalDeps.end()) {
1347    SmallPtrSet<Instruction*, 4> &Set = ReverseDepIt->second;
1348    for (SmallPtrSet<Instruction*, 4>::iterator I = Set.begin(), E = Set.end();
1349         I != E; ++I) {
1350      assert(*I != RemInst && "Already removed NonLocalDep info for RemInst");
1351
1352      PerInstNLInfo &INLD = NonLocalDeps[*I];
1353      // The information is now dirty!
1354      INLD.second = true;
1355
1356      for (NonLocalDepInfo::iterator DI = INLD.first.begin(),
1357           DE = INLD.first.end(); DI != DE; ++DI) {
1358        if (DI->getResult().getInst() != RemInst) continue;
1359
1360        // Convert to a dirty entry for the subsequent instruction.
1361        DI->setResult(NewDirtyVal);
1362
1363        if (Instruction *NextI = NewDirtyVal.getInst())
1364          ReverseDepsToAdd.push_back(std::make_pair(NextI, *I));
1365      }
1366    }
1367
1368    ReverseNonLocalDeps.erase(ReverseDepIt);
1369
1370    // Add new reverse deps after scanning the set, to avoid invalidating 'Set'
1371    while (!ReverseDepsToAdd.empty()) {
1372      ReverseNonLocalDeps[ReverseDepsToAdd.back().first]
1373        .insert(ReverseDepsToAdd.back().second);
1374      ReverseDepsToAdd.pop_back();
1375    }
1376  }
1377
1378  // If the instruction is in ReverseNonLocalPtrDeps then it appears as a
1379  // value in the NonLocalPointerDeps info.
1380  ReverseNonLocalPtrDepTy::iterator ReversePtrDepIt =
1381    ReverseNonLocalPtrDeps.find(RemInst);
1382  if (ReversePtrDepIt != ReverseNonLocalPtrDeps.end()) {
1383    SmallPtrSet<ValueIsLoadPair, 4> &Set = ReversePtrDepIt->second;
1384    SmallVector<std::pair<Instruction*, ValueIsLoadPair>,8> ReversePtrDepsToAdd;
1385
1386    for (SmallPtrSet<ValueIsLoadPair, 4>::iterator I = Set.begin(),
1387         E = Set.end(); I != E; ++I) {
1388      ValueIsLoadPair P = *I;
1389      assert(P.getPointer() != RemInst &&
1390             "Already removed NonLocalPointerDeps info for RemInst");
1391
1392      NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].NonLocalDeps;
1393
1394      // The cache is not valid for any specific block anymore.
1395      NonLocalPointerDeps[P].Pair = BBSkipFirstBlockPair();
1396
1397      // Update any entries for RemInst to use the instruction after it.
1398      for (NonLocalDepInfo::iterator DI = NLPDI.begin(), DE = NLPDI.end();
1399           DI != DE; ++DI) {
1400        if (DI->getResult().getInst() != RemInst) continue;
1401
1402        // Convert to a dirty entry for the subsequent instruction.
1403        DI->setResult(NewDirtyVal);
1404
1405        if (Instruction *NewDirtyInst = NewDirtyVal.getInst())
1406          ReversePtrDepsToAdd.push_back(std::make_pair(NewDirtyInst, P));
1407      }
1408
1409      // Re-sort the NonLocalDepInfo.  Changing the dirty entry to its
1410      // subsequent value may invalidate the sortedness.
1411      std::sort(NLPDI.begin(), NLPDI.end());
1412    }
1413
1414    ReverseNonLocalPtrDeps.erase(ReversePtrDepIt);
1415
1416    while (!ReversePtrDepsToAdd.empty()) {
1417      ReverseNonLocalPtrDeps[ReversePtrDepsToAdd.back().first]
1418        .insert(ReversePtrDepsToAdd.back().second);
1419      ReversePtrDepsToAdd.pop_back();
1420    }
1421  }
1422
1423
1424  assert(!NonLocalDeps.count(RemInst) && "RemInst got reinserted?");
1425  AA->deleteValue(RemInst);
1426  DEBUG(verifyRemoved(RemInst));
1427}
1428/// verifyRemoved - Verify that the specified instruction does not occur
1429/// in our internal data structures.
1430void MemoryDependenceAnalysis::verifyRemoved(Instruction *D) const {
1431  for (LocalDepMapType::const_iterator I = LocalDeps.begin(),
1432       E = LocalDeps.end(); I != E; ++I) {
1433    assert(I->first != D && "Inst occurs in data structures");
1434    assert(I->second.getInst() != D &&
1435           "Inst occurs in data structures");
1436  }
1437
1438  for (CachedNonLocalPointerInfo::const_iterator I =NonLocalPointerDeps.begin(),
1439       E = NonLocalPointerDeps.end(); I != E; ++I) {
1440    assert(I->first.getPointer() != D && "Inst occurs in NLPD map key");
1441    const NonLocalDepInfo &Val = I->second.NonLocalDeps;
1442    for (NonLocalDepInfo::const_iterator II = Val.begin(), E = Val.end();
1443         II != E; ++II)
1444      assert(II->getResult().getInst() != D && "Inst occurs as NLPD value");
1445  }
1446
1447  for (NonLocalDepMapType::const_iterator I = NonLocalDeps.begin(),
1448       E = NonLocalDeps.end(); I != E; ++I) {
1449    assert(I->first != D && "Inst occurs in data structures");
1450    const PerInstNLInfo &INLD = I->second;
1451    for (NonLocalDepInfo::const_iterator II = INLD.first.begin(),
1452         EE = INLD.first.end(); II  != EE; ++II)
1453      assert(II->getResult().getInst() != D && "Inst occurs in data structures");
1454  }
1455
1456  for (ReverseDepMapType::const_iterator I = ReverseLocalDeps.begin(),
1457       E = ReverseLocalDeps.end(); I != E; ++I) {
1458    assert(I->first != D && "Inst occurs in data structures");
1459    for (SmallPtrSet<Instruction*, 4>::const_iterator II = I->second.begin(),
1460         EE = I->second.end(); II != EE; ++II)
1461      assert(*II != D && "Inst occurs in data structures");
1462  }
1463
1464  for (ReverseDepMapType::const_iterator I = ReverseNonLocalDeps.begin(),
1465       E = ReverseNonLocalDeps.end();
1466       I != E; ++I) {
1467    assert(I->first != D && "Inst occurs in data structures");
1468    for (SmallPtrSet<Instruction*, 4>::const_iterator II = I->second.begin(),
1469         EE = I->second.end(); II != EE; ++II)
1470      assert(*II != D && "Inst occurs in data structures");
1471  }
1472
1473  for (ReverseNonLocalPtrDepTy::const_iterator
1474       I = ReverseNonLocalPtrDeps.begin(),
1475       E = ReverseNonLocalPtrDeps.end(); I != E; ++I) {
1476    assert(I->first != D && "Inst occurs in rev NLPD map");
1477
1478    for (SmallPtrSet<ValueIsLoadPair, 4>::const_iterator II = I->second.begin(),
1479         E = I->second.end(); II != E; ++II)
1480      assert(*II != ValueIsLoadPair(D, false) &&
1481             *II != ValueIsLoadPair(D, true) &&
1482             "Inst occurs in ReverseNonLocalPtrDeps map");
1483  }
1484
1485}
1486