MemoryDependenceAnalysis.cpp revision a2f55dd388e1fb33b553a5862bca0fe4bd4b781e
1//===- MemoryDependenceAnalysis.cpp - Mem Deps Implementation  --*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements an analysis that determines, for a given memory
11// operation, what preceding memory operations it depends on.  It builds on
12// alias analysis information, and tries to provide a lazy, caching interface to
13// a common kind of alias information query.
14//
15//===----------------------------------------------------------------------===//
16
17#define DEBUG_TYPE "memdep"
18#include "llvm/Analysis/MemoryDependenceAnalysis.h"
19#include "llvm/Constants.h"
20#include "llvm/Instructions.h"
21#include "llvm/IntrinsicInst.h"
22#include "llvm/Function.h"
23#include "llvm/Analysis/AliasAnalysis.h"
24#include "llvm/ADT/Statistic.h"
25#include "llvm/ADT/STLExtras.h"
26#include "llvm/Support/PredIteratorCache.h"
27#include "llvm/Support/Debug.h"
28#include "llvm/Target/TargetData.h"
29using namespace llvm;
30
31STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses");
32STATISTIC(NumCacheDirtyNonLocal, "Number of dirty cached non-local responses");
33STATISTIC(NumUncacheNonLocal, "Number of uncached non-local responses");
34
35STATISTIC(NumCacheNonLocalPtr,
36          "Number of fully cached non-local ptr responses");
37STATISTIC(NumCacheDirtyNonLocalPtr,
38          "Number of cached, but dirty, non-local ptr responses");
39STATISTIC(NumUncacheNonLocalPtr,
40          "Number of uncached non-local ptr responses");
41STATISTIC(NumCacheCompleteNonLocalPtr,
42          "Number of block queries that were completely cached");
43
44char MemoryDependenceAnalysis::ID = 0;
45
46// Register this pass...
47static RegisterPass<MemoryDependenceAnalysis> X("memdep",
48                                     "Memory Dependence Analysis", false, true);
49
50MemoryDependenceAnalysis::MemoryDependenceAnalysis()
51: FunctionPass(&ID), PredCache(0) {
52}
53MemoryDependenceAnalysis::~MemoryDependenceAnalysis() {
54}
55
56/// Clean up memory in between runs
57void MemoryDependenceAnalysis::releaseMemory() {
58  LocalDeps.clear();
59  NonLocalDeps.clear();
60  NonLocalPointerDeps.clear();
61  ReverseLocalDeps.clear();
62  ReverseNonLocalDeps.clear();
63  ReverseNonLocalPtrDeps.clear();
64  PredCache->clear();
65}
66
67
68
69/// getAnalysisUsage - Does not modify anything.  It uses Alias Analysis.
70///
71void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
72  AU.setPreservesAll();
73  AU.addRequiredTransitive<AliasAnalysis>();
74  AU.addRequiredTransitive<TargetData>();
75}
76
77bool MemoryDependenceAnalysis::runOnFunction(Function &) {
78  AA = &getAnalysis<AliasAnalysis>();
79  TD = &getAnalysis<TargetData>();
80  if (PredCache == 0)
81    PredCache.reset(new PredIteratorCache());
82  return false;
83}
84
85/// RemoveFromReverseMap - This is a helper function that removes Val from
86/// 'Inst's set in ReverseMap.  If the set becomes empty, remove Inst's entry.
87template <typename KeyTy>
88static void RemoveFromReverseMap(DenseMap<Instruction*,
89                                 SmallPtrSet<KeyTy, 4> > &ReverseMap,
90                                 Instruction *Inst, KeyTy Val) {
91  typename DenseMap<Instruction*, SmallPtrSet<KeyTy, 4> >::iterator
92  InstIt = ReverseMap.find(Inst);
93  assert(InstIt != ReverseMap.end() && "Reverse map out of sync?");
94  bool Found = InstIt->second.erase(Val);
95  assert(Found && "Invalid reverse map!"); Found=Found;
96  if (InstIt->second.empty())
97    ReverseMap.erase(InstIt);
98}
99
100
101/// getCallSiteDependencyFrom - Private helper for finding the local
102/// dependencies of a call site.
103MemDepResult MemoryDependenceAnalysis::
104getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall,
105                          BasicBlock::iterator ScanIt, BasicBlock *BB) {
106  // Walk backwards through the block, looking for dependencies
107  while (ScanIt != BB->begin()) {
108    Instruction *Inst = --ScanIt;
109
110    // If this inst is a memory op, get the pointer it accessed
111    Value *Pointer = 0;
112    uint64_t PointerSize = 0;
113    if (StoreInst *S = dyn_cast<StoreInst>(Inst)) {
114      Pointer = S->getPointerOperand();
115      PointerSize = TD->getTypeStoreSize(S->getOperand(0)->getType());
116    } else if (VAArgInst *V = dyn_cast<VAArgInst>(Inst)) {
117      Pointer = V->getOperand(0);
118      PointerSize = TD->getTypeStoreSize(V->getType());
119    } else if (FreeInst *F = dyn_cast<FreeInst>(Inst)) {
120      Pointer = F->getPointerOperand();
121
122      // FreeInsts erase the entire structure
123      PointerSize = ~0ULL;
124    } else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) {
125      // Debug intrinsics don't cause dependences.
126      if (isa<DbgInfoIntrinsic>(Inst)) continue;
127      CallSite InstCS = CallSite::get(Inst);
128      // If these two calls do not interfere, look past it.
129      switch (AA->getModRefInfo(CS, InstCS)) {
130      case AliasAnalysis::NoModRef:
131        // If the two calls don't interact (e.g. InstCS is readnone) keep
132        // scanning.
133        continue;
134      case AliasAnalysis::Ref:
135        // If the two calls read the same memory locations and CS is a readonly
136        // function, then we have two cases: 1) the calls may not interfere with
137        // each other at all.  2) the calls may produce the same value.  In case
138        // #1 we want to ignore the values, in case #2, we want to return Inst
139        // as a Def dependence.  This allows us to CSE in cases like:
140        //   X = strlen(P);
141        //    memchr(...);
142        //   Y = strlen(P);  // Y = X
143        if (isReadOnlyCall) {
144          if (CS.getCalledFunction() != 0 &&
145              CS.getCalledFunction() == InstCS.getCalledFunction())
146            return MemDepResult::getDef(Inst);
147          // Ignore unrelated read/read call dependences.
148          continue;
149        }
150        // FALL THROUGH
151      default:
152        return MemDepResult::getClobber(Inst);
153      }
154    } else {
155      // Non-memory instruction.
156      continue;
157    }
158
159    if (AA->getModRefInfo(CS, Pointer, PointerSize) != AliasAnalysis::NoModRef)
160      return MemDepResult::getClobber(Inst);
161  }
162
163  // No dependence found.  If this is the entry block of the function, it is a
164  // clobber, otherwise it is non-local.
165  if (BB != &BB->getParent()->getEntryBlock())
166    return MemDepResult::getNonLocal();
167  return MemDepResult::getClobber(ScanIt);
168}
169
170/// getPointerDependencyFrom - Return the instruction on which a memory
171/// location depends.  If isLoad is true, this routine ignore may-aliases with
172/// read-only operations.
173MemDepResult MemoryDependenceAnalysis::
174getPointerDependencyFrom(Value *MemPtr, uint64_t MemSize, bool isLoad,
175                         BasicBlock::iterator ScanIt, BasicBlock *BB) {
176
177  // Walk backwards through the basic block, looking for dependencies.
178  while (ScanIt != BB->begin()) {
179    Instruction *Inst = --ScanIt;
180
181    // Debug intrinsics don't cause dependences.
182    if (isa<DbgInfoIntrinsic>(Inst)) continue;
183
184    // Values depend on loads if the pointers are must aliased.  This means that
185    // a load depends on another must aliased load from the same value.
186    if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
187      Value *Pointer = LI->getPointerOperand();
188      uint64_t PointerSize = TD->getTypeStoreSize(LI->getType());
189
190      // If we found a pointer, check if it could be the same as our pointer.
191      AliasAnalysis::AliasResult R =
192        AA->alias(Pointer, PointerSize, MemPtr, MemSize);
193      if (R == AliasAnalysis::NoAlias)
194        continue;
195
196      // May-alias loads don't depend on each other without a dependence.
197      if (isLoad && R == AliasAnalysis::MayAlias)
198        continue;
199      // Stores depend on may and must aliased loads, loads depend on must-alias
200      // loads.
201      return MemDepResult::getDef(Inst);
202    }
203
204    if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
205      // If alias analysis can tell that this store is guaranteed to not modify
206      // the query pointer, ignore it.  Use getModRefInfo to handle cases where
207      // the query pointer points to constant memory etc.
208      if (AA->getModRefInfo(SI, MemPtr, MemSize) == AliasAnalysis::NoModRef)
209        continue;
210
211      // Ok, this store might clobber the query pointer.  Check to see if it is
212      // a must alias: in this case, we want to return this as a def.
213      Value *Pointer = SI->getPointerOperand();
214      uint64_t PointerSize = TD->getTypeStoreSize(SI->getOperand(0)->getType());
215
216      // If we found a pointer, check if it could be the same as our pointer.
217      AliasAnalysis::AliasResult R =
218        AA->alias(Pointer, PointerSize, MemPtr, MemSize);
219
220      if (R == AliasAnalysis::NoAlias)
221        continue;
222      if (R == AliasAnalysis::MayAlias)
223        return MemDepResult::getClobber(Inst);
224      return MemDepResult::getDef(Inst);
225    }
226
227    // If this is an allocation, and if we know that the accessed pointer is to
228    // the allocation, return Def.  This means that there is no dependence and
229    // the access can be optimized based on that.  For example, a load could
230    // turn into undef.
231    if (AllocationInst *AI = dyn_cast<AllocationInst>(Inst)) {
232      Value *AccessPtr = MemPtr->getUnderlyingObject();
233
234      if (AccessPtr == AI ||
235          AA->alias(AI, 1, AccessPtr, 1) == AliasAnalysis::MustAlias)
236        return MemDepResult::getDef(AI);
237      continue;
238    }
239
240    // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
241    switch (AA->getModRefInfo(Inst, MemPtr, MemSize)) {
242    case AliasAnalysis::NoModRef:
243      // If the call has no effect on the queried pointer, just ignore it.
244      continue;
245    case AliasAnalysis::Ref:
246      // If the call is known to never store to the pointer, and if this is a
247      // load query, we can safely ignore it (scan past it).
248      if (isLoad)
249        continue;
250      // FALL THROUGH.
251    default:
252      // Otherwise, there is a potential dependence.  Return a clobber.
253      return MemDepResult::getClobber(Inst);
254    }
255  }
256
257  // No dependence found.  If this is the entry block of the function, it is a
258  // clobber, otherwise it is non-local.
259  if (BB != &BB->getParent()->getEntryBlock())
260    return MemDepResult::getNonLocal();
261  return MemDepResult::getClobber(ScanIt);
262}
263
264/// getDependency - Return the instruction on which a memory operation
265/// depends.
266MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) {
267  Instruction *ScanPos = QueryInst;
268
269  // Check for a cached result
270  MemDepResult &LocalCache = LocalDeps[QueryInst];
271
272  // If the cached entry is non-dirty, just return it.  Note that this depends
273  // on MemDepResult's default constructing to 'dirty'.
274  if (!LocalCache.isDirty())
275    return LocalCache;
276
277  // Otherwise, if we have a dirty entry, we know we can start the scan at that
278  // instruction, which may save us some work.
279  if (Instruction *Inst = LocalCache.getInst()) {
280    ScanPos = Inst;
281
282    RemoveFromReverseMap(ReverseLocalDeps, Inst, QueryInst);
283  }
284
285  BasicBlock *QueryParent = QueryInst->getParent();
286
287  Value *MemPtr = 0;
288  uint64_t MemSize = 0;
289
290  // Do the scan.
291  if (BasicBlock::iterator(QueryInst) == QueryParent->begin()) {
292    // No dependence found.  If this is the entry block of the function, it is a
293    // clobber, otherwise it is non-local.
294    if (QueryParent != &QueryParent->getParent()->getEntryBlock())
295      LocalCache = MemDepResult::getNonLocal();
296    else
297      LocalCache = MemDepResult::getClobber(QueryInst);
298  } else if (StoreInst *SI = dyn_cast<StoreInst>(QueryInst)) {
299    // If this is a volatile store, don't mess around with it.  Just return the
300    // previous instruction as a clobber.
301    if (SI->isVolatile())
302      LocalCache = MemDepResult::getClobber(--BasicBlock::iterator(ScanPos));
303    else {
304      MemPtr = SI->getPointerOperand();
305      MemSize = TD->getTypeStoreSize(SI->getOperand(0)->getType());
306    }
307  } else if (LoadInst *LI = dyn_cast<LoadInst>(QueryInst)) {
308    // If this is a volatile load, don't mess around with it.  Just return the
309    // previous instruction as a clobber.
310    if (LI->isVolatile())
311      LocalCache = MemDepResult::getClobber(--BasicBlock::iterator(ScanPos));
312    else {
313      MemPtr = LI->getPointerOperand();
314      MemSize = TD->getTypeStoreSize(LI->getType());
315    }
316  } else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) {
317    CallSite QueryCS = CallSite::get(QueryInst);
318    bool isReadOnly = AA->onlyReadsMemory(QueryCS);
319    LocalCache = getCallSiteDependencyFrom(QueryCS, isReadOnly, ScanPos,
320                                           QueryParent);
321  } else if (FreeInst *FI = dyn_cast<FreeInst>(QueryInst)) {
322    MemPtr = FI->getPointerOperand();
323    // FreeInsts erase the entire structure, not just a field.
324    MemSize = ~0UL;
325  } else {
326    // Non-memory instruction.
327    LocalCache = MemDepResult::getClobber(--BasicBlock::iterator(ScanPos));
328  }
329
330  // If we need to do a pointer scan, make it happen.
331  if (MemPtr)
332    LocalCache = getPointerDependencyFrom(MemPtr, MemSize,
333                                          isa<LoadInst>(QueryInst),
334                                          ScanPos, QueryParent);
335
336  // Remember the result!
337  if (Instruction *I = LocalCache.getInst())
338    ReverseLocalDeps[I].insert(QueryInst);
339
340  return LocalCache;
341}
342
343#ifndef NDEBUG
344/// AssertSorted - This method is used when -debug is specified to verify that
345/// cache arrays are properly kept sorted.
346static void AssertSorted(MemoryDependenceAnalysis::NonLocalDepInfo &Cache,
347                         int Count = -1) {
348  if (Count == -1) Count = Cache.size();
349  if (Count == 0) return;
350
351  for (unsigned i = 1; i != unsigned(Count); ++i)
352    assert(Cache[i-1] <= Cache[i] && "Cache isn't sorted!");
353}
354#endif
355
356/// getNonLocalCallDependency - Perform a full dependency query for the
357/// specified call, returning the set of blocks that the value is
358/// potentially live across.  The returned set of results will include a
359/// "NonLocal" result for all blocks where the value is live across.
360///
361/// This method assumes the instruction returns a "NonLocal" dependency
362/// within its own block.
363///
364/// This returns a reference to an internal data structure that may be
365/// invalidated on the next non-local query or when an instruction is
366/// removed.  Clients must copy this data if they want it around longer than
367/// that.
368const MemoryDependenceAnalysis::NonLocalDepInfo &
369MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) {
370  assert(getDependency(QueryCS.getInstruction()).isNonLocal() &&
371 "getNonLocalCallDependency should only be used on calls with non-local deps!");
372  PerInstNLInfo &CacheP = NonLocalDeps[QueryCS.getInstruction()];
373  NonLocalDepInfo &Cache = CacheP.first;
374
375  /// DirtyBlocks - This is the set of blocks that need to be recomputed.  In
376  /// the cached case, this can happen due to instructions being deleted etc. In
377  /// the uncached case, this starts out as the set of predecessors we care
378  /// about.
379  SmallVector<BasicBlock*, 32> DirtyBlocks;
380
381  if (!Cache.empty()) {
382    // Okay, we have a cache entry.  If we know it is not dirty, just return it
383    // with no computation.
384    if (!CacheP.second) {
385      NumCacheNonLocal++;
386      return Cache;
387    }
388
389    // If we already have a partially computed set of results, scan them to
390    // determine what is dirty, seeding our initial DirtyBlocks worklist.
391    for (NonLocalDepInfo::iterator I = Cache.begin(), E = Cache.end();
392       I != E; ++I)
393      if (I->second.isDirty())
394        DirtyBlocks.push_back(I->first);
395
396    // Sort the cache so that we can do fast binary search lookups below.
397    std::sort(Cache.begin(), Cache.end());
398
399    ++NumCacheDirtyNonLocal;
400    //cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: "
401    //     << Cache.size() << " cached: " << *QueryInst;
402  } else {
403    // Seed DirtyBlocks with each of the preds of QueryInst's block.
404    BasicBlock *QueryBB = QueryCS.getInstruction()->getParent();
405    for (BasicBlock **PI = PredCache->GetPreds(QueryBB); *PI; ++PI)
406      DirtyBlocks.push_back(*PI);
407    NumUncacheNonLocal++;
408  }
409
410  // isReadonlyCall - If this is a read-only call, we can be more aggressive.
411  bool isReadonlyCall = AA->onlyReadsMemory(QueryCS);
412
413  SmallPtrSet<BasicBlock*, 64> Visited;
414
415  unsigned NumSortedEntries = Cache.size();
416  DEBUG(AssertSorted(Cache));
417
418  // Iterate while we still have blocks to update.
419  while (!DirtyBlocks.empty()) {
420    BasicBlock *DirtyBB = DirtyBlocks.back();
421    DirtyBlocks.pop_back();
422
423    // Already processed this block?
424    if (!Visited.insert(DirtyBB))
425      continue;
426
427    // Do a binary search to see if we already have an entry for this block in
428    // the cache set.  If so, find it.
429    DEBUG(AssertSorted(Cache, NumSortedEntries));
430    NonLocalDepInfo::iterator Entry =
431      std::upper_bound(Cache.begin(), Cache.begin()+NumSortedEntries,
432                       std::make_pair(DirtyBB, MemDepResult()));
433    if (Entry != Cache.begin() && prior(Entry)->first == DirtyBB)
434      --Entry;
435
436    MemDepResult *ExistingResult = 0;
437    if (Entry != Cache.begin()+NumSortedEntries &&
438        Entry->first == DirtyBB) {
439      // If we already have an entry, and if it isn't already dirty, the block
440      // is done.
441      if (!Entry->second.isDirty())
442        continue;
443
444      // Otherwise, remember this slot so we can update the value.
445      ExistingResult = &Entry->second;
446    }
447
448    // If the dirty entry has a pointer, start scanning from it so we don't have
449    // to rescan the entire block.
450    BasicBlock::iterator ScanPos = DirtyBB->end();
451    if (ExistingResult) {
452      if (Instruction *Inst = ExistingResult->getInst()) {
453        ScanPos = Inst;
454        // We're removing QueryInst's use of Inst.
455        RemoveFromReverseMap(ReverseNonLocalDeps, Inst,
456                             QueryCS.getInstruction());
457      }
458    }
459
460    // Find out if this block has a local dependency for QueryInst.
461    MemDepResult Dep;
462
463    if (ScanPos != DirtyBB->begin()) {
464      Dep = getCallSiteDependencyFrom(QueryCS, isReadonlyCall,ScanPos, DirtyBB);
465    } else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) {
466      // No dependence found.  If this is the entry block of the function, it is
467      // a clobber, otherwise it is non-local.
468      Dep = MemDepResult::getNonLocal();
469    } else {
470      Dep = MemDepResult::getClobber(ScanPos);
471    }
472
473    // If we had a dirty entry for the block, update it.  Otherwise, just add
474    // a new entry.
475    if (ExistingResult)
476      *ExistingResult = Dep;
477    else
478      Cache.push_back(std::make_pair(DirtyBB, Dep));
479
480    // If the block has a dependency (i.e. it isn't completely transparent to
481    // the value), remember the association!
482    if (!Dep.isNonLocal()) {
483      // Keep the ReverseNonLocalDeps map up to date so we can efficiently
484      // update this when we remove instructions.
485      if (Instruction *Inst = Dep.getInst())
486        ReverseNonLocalDeps[Inst].insert(QueryCS.getInstruction());
487    } else {
488
489      // If the block *is* completely transparent to the load, we need to check
490      // the predecessors of this block.  Add them to our worklist.
491      for (BasicBlock **PI = PredCache->GetPreds(DirtyBB); *PI; ++PI)
492        DirtyBlocks.push_back(*PI);
493    }
494  }
495
496  return Cache;
497}
498
499/// getNonLocalPointerDependency - Perform a full dependency query for an
500/// access to the specified (non-volatile) memory location, returning the
501/// set of instructions that either define or clobber the value.
502///
503/// This method assumes the pointer has a "NonLocal" dependency within its
504/// own block.
505///
506void MemoryDependenceAnalysis::
507getNonLocalPointerDependency(Value *Pointer, bool isLoad, BasicBlock *FromBB,
508                             SmallVectorImpl<NonLocalDepEntry> &Result) {
509  assert(isa<PointerType>(Pointer->getType()) &&
510         "Can't get pointer deps of a non-pointer!");
511  Result.clear();
512
513  // We know that the pointer value is live into FromBB find the def/clobbers
514  // from presecessors.
515  const Type *EltTy = cast<PointerType>(Pointer->getType())->getElementType();
516  uint64_t PointeeSize = TD->getTypeStoreSize(EltTy);
517
518  // This is the set of blocks we've inspected, and the pointer we consider in
519  // each block.  Because of critical edges, we currently bail out if querying
520  // a block with multiple different pointers.  This can happen during PHI
521  // translation.
522  DenseMap<BasicBlock*, Value*> Visited;
523  if (!getNonLocalPointerDepFromBB(Pointer, PointeeSize, isLoad, FromBB,
524                                   Result, Visited, true))
525    return;
526  Result.clear();
527  Result.push_back(std::make_pair(FromBB,
528                                  MemDepResult::getClobber(FromBB->begin())));
529}
530
531/// GetNonLocalInfoForBlock - Compute the memdep value for BB with
532/// Pointer/PointeeSize using either cached information in Cache or by doing a
533/// lookup (which may use dirty cache info if available).  If we do a lookup,
534/// add the result to the cache.
535MemDepResult MemoryDependenceAnalysis::
536GetNonLocalInfoForBlock(Value *Pointer, uint64_t PointeeSize,
537                        bool isLoad, BasicBlock *BB,
538                        NonLocalDepInfo *Cache, unsigned NumSortedEntries) {
539
540  // Do a binary search to see if we already have an entry for this block in
541  // the cache set.  If so, find it.
542  NonLocalDepInfo::iterator Entry =
543    std::upper_bound(Cache->begin(), Cache->begin()+NumSortedEntries,
544                     std::make_pair(BB, MemDepResult()));
545  if (Entry != Cache->begin() && prior(Entry)->first == BB)
546    --Entry;
547
548  MemDepResult *ExistingResult = 0;
549  if (Entry != Cache->begin()+NumSortedEntries && Entry->first == BB)
550    ExistingResult = &Entry->second;
551
552  // If we have a cached entry, and it is non-dirty, use it as the value for
553  // this dependency.
554  if (ExistingResult && !ExistingResult->isDirty()) {
555    ++NumCacheNonLocalPtr;
556    return *ExistingResult;
557  }
558
559  // Otherwise, we have to scan for the value.  If we have a dirty cache
560  // entry, start scanning from its position, otherwise we scan from the end
561  // of the block.
562  BasicBlock::iterator ScanPos = BB->end();
563  if (ExistingResult && ExistingResult->getInst()) {
564    assert(ExistingResult->getInst()->getParent() == BB &&
565           "Instruction invalidated?");
566    ++NumCacheDirtyNonLocalPtr;
567    ScanPos = ExistingResult->getInst();
568
569    // Eliminating the dirty entry from 'Cache', so update the reverse info.
570    ValueIsLoadPair CacheKey(Pointer, isLoad);
571    RemoveFromReverseMap(ReverseNonLocalPtrDeps, ScanPos, CacheKey);
572  } else {
573    ++NumUncacheNonLocalPtr;
574  }
575
576  // Scan the block for the dependency.
577  MemDepResult Dep = getPointerDependencyFrom(Pointer, PointeeSize, isLoad,
578                                              ScanPos, BB);
579
580  // If we had a dirty entry for the block, update it.  Otherwise, just add
581  // a new entry.
582  if (ExistingResult)
583    *ExistingResult = Dep;
584  else
585    Cache->push_back(std::make_pair(BB, Dep));
586
587  // If the block has a dependency (i.e. it isn't completely transparent to
588  // the value), remember the reverse association because we just added it
589  // to Cache!
590  if (Dep.isNonLocal())
591    return Dep;
592
593  // Keep the ReverseNonLocalPtrDeps map up to date so we can efficiently
594  // update MemDep when we remove instructions.
595  Instruction *Inst = Dep.getInst();
596  assert(Inst && "Didn't depend on anything?");
597  ValueIsLoadPair CacheKey(Pointer, isLoad);
598  ReverseNonLocalPtrDeps[Inst].insert(CacheKey);
599  return Dep;
600}
601
602/// SortNonLocalDepInfoCache - Sort the a NonLocalDepInfo cache, given a certain
603/// number of elements in the array that are already properly ordered.  This is
604/// optimized for the case when only a few entries are added.
605static void
606SortNonLocalDepInfoCache(MemoryDependenceAnalysis::NonLocalDepInfo &Cache,
607                         unsigned NumSortedEntries) {
608  switch (Cache.size() - NumSortedEntries) {
609  case 0:
610    // done, no new entries.
611    break;
612  case 2: {
613    // Two new entries, insert the last one into place.
614    MemoryDependenceAnalysis::NonLocalDepEntry Val = Cache.back();
615    Cache.pop_back();
616    MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry =
617      std::upper_bound(Cache.begin(), Cache.end()-1, Val);
618    Cache.insert(Entry, Val);
619    // FALL THROUGH.
620  }
621  case 1:
622    // One new entry, Just insert the new value at the appropriate position.
623    if (Cache.size() != 1) {
624      MemoryDependenceAnalysis::NonLocalDepEntry Val = Cache.back();
625      Cache.pop_back();
626      MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry =
627        std::upper_bound(Cache.begin(), Cache.end(), Val);
628      Cache.insert(Entry, Val);
629    }
630    break;
631  default:
632    // Added many values, do a full scale sort.
633    std::sort(Cache.begin(), Cache.end());
634    break;
635  }
636}
637
638
639/// getNonLocalPointerDepFromBB - Perform a dependency query based on
640/// pointer/pointeesize starting at the end of StartBB.  Add any clobber/def
641/// results to the results vector and keep track of which blocks are visited in
642/// 'Visited'.
643///
644/// This has special behavior for the first block queries (when SkipFirstBlock
645/// is true).  In this special case, it ignores the contents of the specified
646/// block and starts returning dependence info for its predecessors.
647///
648/// This function returns false on success, or true to indicate that it could
649/// not compute dependence information for some reason.  This should be treated
650/// as a clobber dependence on the first instruction in the predecessor block.
651bool MemoryDependenceAnalysis::
652getNonLocalPointerDepFromBB(Value *Pointer, uint64_t PointeeSize,
653                            bool isLoad, BasicBlock *StartBB,
654                            SmallVectorImpl<NonLocalDepEntry> &Result,
655                            DenseMap<BasicBlock*, Value*> &Visited,
656                            bool SkipFirstBlock) {
657
658  // Look up the cached info for Pointer.
659  ValueIsLoadPair CacheKey(Pointer, isLoad);
660
661  std::pair<BBSkipFirstBlockPair, NonLocalDepInfo> *CacheInfo =
662    &NonLocalPointerDeps[CacheKey];
663  NonLocalDepInfo *Cache = &CacheInfo->second;
664
665  // If we have valid cached information for exactly the block we are
666  // investigating, just return it with no recomputation.
667  if (CacheInfo->first == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) {
668    // We have a fully cached result for this query then we can just return the
669    // cached results and populate the visited set.  However, we have to verify
670    // that we don't already have conflicting results for these blocks.  Check
671    // to ensure that if a block in the results set is in the visited set that
672    // it was for the same pointer query.
673    if (!Visited.empty()) {
674      for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end();
675           I != E; ++I) {
676        DenseMap<BasicBlock*, Value*>::iterator VI = Visited.find(I->first);
677        if (VI == Visited.end() || VI->second == Pointer) continue;
678
679        // We have a pointer mismatch in a block.  Just return clobber, saying
680        // that something was clobbered in this result.  We could also do a
681        // non-fully cached query, but there is little point in doing this.
682        return true;
683      }
684    }
685
686    for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end();
687         I != E; ++I) {
688      Visited.insert(std::make_pair(I->first, Pointer));
689      if (!I->second.isNonLocal())
690        Result.push_back(*I);
691    }
692    ++NumCacheCompleteNonLocalPtr;
693    return false;
694  }
695
696  // Otherwise, either this is a new block, a block with an invalid cache
697  // pointer or one that we're about to invalidate by putting more info into it
698  // than its valid cache info.  If empty, the result will be valid cache info,
699  // otherwise it isn't.
700  if (Cache->empty())
701    CacheInfo->first = BBSkipFirstBlockPair(StartBB, SkipFirstBlock);
702  else
703    CacheInfo->first = BBSkipFirstBlockPair();
704
705  SmallVector<BasicBlock*, 32> Worklist;
706  Worklist.push_back(StartBB);
707
708  // Keep track of the entries that we know are sorted.  Previously cached
709  // entries will all be sorted.  The entries we add we only sort on demand (we
710  // don't insert every element into its sorted position).  We know that we
711  // won't get any reuse from currently inserted values, because we don't
712  // revisit blocks after we insert info for them.
713  unsigned NumSortedEntries = Cache->size();
714  DEBUG(AssertSorted(*Cache));
715
716  while (!Worklist.empty()) {
717    BasicBlock *BB = Worklist.pop_back_val();
718
719    // Skip the first block if we have it.
720    if (!SkipFirstBlock) {
721      // Analyze the dependency of *Pointer in FromBB.  See if we already have
722      // been here.
723      assert(Visited.count(BB) && "Should check 'visited' before adding to WL");
724
725      // Get the dependency info for Pointer in BB.  If we have cached
726      // information, we will use it, otherwise we compute it.
727      DEBUG(AssertSorted(*Cache, NumSortedEntries));
728      MemDepResult Dep = GetNonLocalInfoForBlock(Pointer, PointeeSize, isLoad,
729                                                 BB, Cache, NumSortedEntries);
730
731      // If we got a Def or Clobber, add this to the list of results.
732      if (!Dep.isNonLocal()) {
733        Result.push_back(NonLocalDepEntry(BB, Dep));
734        continue;
735      }
736    }
737
738    // If 'Pointer' is an instruction defined in this block, then we need to do
739    // phi translation to change it into a value live in the predecessor block.
740    // If phi translation fails, then we can't continue dependence analysis.
741    Instruction *PtrInst = dyn_cast<Instruction>(Pointer);
742    bool NeedsPHITranslation = PtrInst && PtrInst->getParent() == BB;
743
744    // If no PHI translation is needed, just add all the predecessors of this
745    // block to scan them as well.
746    if (!NeedsPHITranslation) {
747      SkipFirstBlock = false;
748      for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) {
749        // Verify that we haven't looked at this block yet.
750        std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool>
751          InsertRes = Visited.insert(std::make_pair(*PI, Pointer));
752        if (InsertRes.second) {
753          // First time we've looked at *PI.
754          Worklist.push_back(*PI);
755          continue;
756        }
757
758        // If we have seen this block before, but it was with a different
759        // pointer then we have a phi translation failure and we have to treat
760        // this as a clobber.
761        if (InsertRes.first->second != Pointer)
762          goto PredTranslationFailure;
763      }
764      continue;
765    }
766
767    // If we do need to do phi translation, then there are a bunch of different
768    // cases, because we have to find a Value* live in the predecessor block. We
769    // know that PtrInst is defined in this block at least.
770
771    // We may have added values to the cache list before this PHI translation.
772    // If so, we haven't done anything to ensure that the cache remains sorted.
773    // Sort it now (if needed) so that recursive invocations of
774    // getNonLocalPointerDepFromBB and other routines that could reuse the cache
775    // value will only see properly sorted cache arrays.
776    if (Cache && NumSortedEntries != Cache->size()) {
777      SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
778      NumSortedEntries = Cache->size();
779    }
780
781    // If this is directly a PHI node, just use the incoming values for each
782    // pred as the phi translated version.
783    if (PHINode *PtrPHI = dyn_cast<PHINode>(PtrInst)) {
784      Cache = 0;
785
786      for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) {
787        BasicBlock *Pred = *PI;
788        Value *PredPtr = PtrPHI->getIncomingValueForBlock(Pred);
789
790        // Check to see if we have already visited this pred block with another
791        // pointer.  If so, we can't do this lookup.  This failure can occur
792        // with PHI translation when a critical edge exists and the PHI node in
793        // the successor translates to a pointer value different than the
794        // pointer the block was first analyzed with.
795        std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool>
796          InsertRes = Visited.insert(std::make_pair(Pred, PredPtr));
797
798        if (!InsertRes.second) {
799          // If the predecessor was visited with PredPtr, then we already did
800          // the analysis and can ignore it.
801          if (InsertRes.first->second == PredPtr)
802            continue;
803
804          // Otherwise, the block was previously analyzed with a different
805          // pointer.  We can't represent the result of this case, so we just
806          // treat this as a phi translation failure.
807          goto PredTranslationFailure;
808        }
809
810        // FIXME: it is entirely possible that PHI translating will end up with
811        // the same value.  Consider PHI translating something like:
812        // X = phi [x, bb1], [y, bb2].  PHI translating for bb1 doesn't *need*
813        // to recurse here, pedantically speaking.
814
815        // If we have a problem phi translating, fall through to the code below
816        // to handle the failure condition.
817        if (getNonLocalPointerDepFromBB(PredPtr, PointeeSize, isLoad, Pred,
818                                        Result, Visited))
819          goto PredTranslationFailure;
820      }
821
822      // Refresh the CacheInfo/Cache pointer so that it isn't invalidated.
823      CacheInfo = &NonLocalPointerDeps[CacheKey];
824      Cache = &CacheInfo->second;
825      NumSortedEntries = Cache->size();
826
827      // Since we did phi translation, the "Cache" set won't contain all of the
828      // results for the query.  This is ok (we can still use it to accelerate
829      // specific block queries) but we can't do the fastpath "return all
830      // results from the set"  Clear out the indicator for this.
831      CacheInfo->first = BBSkipFirstBlockPair();
832      SkipFirstBlock = false;
833      continue;
834    }
835
836    // TODO: BITCAST, GEP.
837
838    //   cerr << "MEMDEP: Could not PHI translate: " << *Pointer;
839    //   if (isa<BitCastInst>(PtrInst) || isa<GetElementPtrInst>(PtrInst))
840    //     cerr << "OP:\t\t\t\t" << *PtrInst->getOperand(0);
841  PredTranslationFailure:
842
843    if (Cache == 0) {
844      // Refresh the CacheInfo/Cache pointer if it got invalidated.
845      CacheInfo = &NonLocalPointerDeps[CacheKey];
846      Cache = &CacheInfo->second;
847      NumSortedEntries = Cache->size();
848    }
849
850    // Since we did phi translation, the "Cache" set won't contain all of the
851    // results for the query.  This is ok (we can still use it to accelerate
852    // specific block queries) but we can't do the fastpath "return all
853    // results from the set"  Clear out the indicator for this.
854    CacheInfo->first = BBSkipFirstBlockPair();
855
856    // If *nothing* works, mark the pointer as being clobbered by the first
857    // instruction in this block.
858    //
859    // If this is the magic first block, return this as a clobber of the whole
860    // incoming value.  Since we can't phi translate to one of the predecessors,
861    // we have to bail out.
862    if (SkipFirstBlock)
863      return true;
864
865    for (NonLocalDepInfo::reverse_iterator I = Cache->rbegin(); ; ++I) {
866      assert(I != Cache->rend() && "Didn't find current block??");
867      if (I->first != BB)
868        continue;
869
870      assert(I->second.isNonLocal() &&
871             "Should only be here with transparent block");
872      I->second = MemDepResult::getClobber(BB->begin());
873      ReverseNonLocalPtrDeps[BB->begin()].insert(CacheKey);
874      Result.push_back(*I);
875      break;
876    }
877  }
878
879  // Okay, we're done now.  If we added new values to the cache, re-sort it.
880  SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
881  DEBUG(AssertSorted(*Cache));
882  return false;
883}
884
885/// RemoveCachedNonLocalPointerDependencies - If P exists in
886/// CachedNonLocalPointerInfo, remove it.
887void MemoryDependenceAnalysis::
888RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair P) {
889  CachedNonLocalPointerInfo::iterator It =
890    NonLocalPointerDeps.find(P);
891  if (It == NonLocalPointerDeps.end()) return;
892
893  // Remove all of the entries in the BB->val map.  This involves removing
894  // instructions from the reverse map.
895  NonLocalDepInfo &PInfo = It->second.second;
896
897  for (unsigned i = 0, e = PInfo.size(); i != e; ++i) {
898    Instruction *Target = PInfo[i].second.getInst();
899    if (Target == 0) continue;  // Ignore non-local dep results.
900    assert(Target->getParent() == PInfo[i].first);
901
902    // Eliminating the dirty entry from 'Cache', so update the reverse info.
903    RemoveFromReverseMap(ReverseNonLocalPtrDeps, Target, P);
904  }
905
906  // Remove P from NonLocalPointerDeps (which deletes NonLocalDepInfo).
907  NonLocalPointerDeps.erase(It);
908}
909
910
911/// invalidateCachedPointerInfo - This method is used to invalidate cached
912/// information about the specified pointer, because it may be too
913/// conservative in memdep.  This is an optional call that can be used when
914/// the client detects an equivalence between the pointer and some other
915/// value and replaces the other value with ptr. This can make Ptr available
916/// in more places that cached info does not necessarily keep.
917void MemoryDependenceAnalysis::invalidateCachedPointerInfo(Value *Ptr) {
918  // If Ptr isn't really a pointer, just ignore it.
919  if (!isa<PointerType>(Ptr->getType())) return;
920  // Flush store info for the pointer.
921  RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, false));
922  // Flush load info for the pointer.
923  RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, true));
924}
925
926/// removeInstruction - Remove an instruction from the dependence analysis,
927/// updating the dependence of instructions that previously depended on it.
928/// This method attempts to keep the cache coherent using the reverse map.
929void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) {
930  // Walk through the Non-local dependencies, removing this one as the value
931  // for any cached queries.
932  NonLocalDepMapType::iterator NLDI = NonLocalDeps.find(RemInst);
933  if (NLDI != NonLocalDeps.end()) {
934    NonLocalDepInfo &BlockMap = NLDI->second.first;
935    for (NonLocalDepInfo::iterator DI = BlockMap.begin(), DE = BlockMap.end();
936         DI != DE; ++DI)
937      if (Instruction *Inst = DI->second.getInst())
938        RemoveFromReverseMap(ReverseNonLocalDeps, Inst, RemInst);
939    NonLocalDeps.erase(NLDI);
940  }
941
942  // If we have a cached local dependence query for this instruction, remove it.
943  //
944  LocalDepMapType::iterator LocalDepEntry = LocalDeps.find(RemInst);
945  if (LocalDepEntry != LocalDeps.end()) {
946    // Remove us from DepInst's reverse set now that the local dep info is gone.
947    if (Instruction *Inst = LocalDepEntry->second.getInst())
948      RemoveFromReverseMap(ReverseLocalDeps, Inst, RemInst);
949
950    // Remove this local dependency info.
951    LocalDeps.erase(LocalDepEntry);
952  }
953
954  // If we have any cached pointer dependencies on this instruction, remove
955  // them.  If the instruction has non-pointer type, then it can't be a pointer
956  // base.
957
958  // Remove it from both the load info and the store info.  The instruction
959  // can't be in either of these maps if it is non-pointer.
960  if (isa<PointerType>(RemInst->getType())) {
961    RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, false));
962    RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, true));
963  }
964
965  // Loop over all of the things that depend on the instruction we're removing.
966  //
967  SmallVector<std::pair<Instruction*, Instruction*>, 8> ReverseDepsToAdd;
968
969  // If we find RemInst as a clobber or Def in any of the maps for other values,
970  // we need to replace its entry with a dirty version of the instruction after
971  // it.  If RemInst is a terminator, we use a null dirty value.
972  //
973  // Using a dirty version of the instruction after RemInst saves having to scan
974  // the entire block to get to this point.
975  MemDepResult NewDirtyVal;
976  if (!RemInst->isTerminator())
977    NewDirtyVal = MemDepResult::getDirty(++BasicBlock::iterator(RemInst));
978
979  ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst);
980  if (ReverseDepIt != ReverseLocalDeps.end()) {
981    SmallPtrSet<Instruction*, 4> &ReverseDeps = ReverseDepIt->second;
982    // RemInst can't be the terminator if it has local stuff depending on it.
983    assert(!ReverseDeps.empty() && !isa<TerminatorInst>(RemInst) &&
984           "Nothing can locally depend on a terminator");
985
986    for (SmallPtrSet<Instruction*, 4>::iterator I = ReverseDeps.begin(),
987         E = ReverseDeps.end(); I != E; ++I) {
988      Instruction *InstDependingOnRemInst = *I;
989      assert(InstDependingOnRemInst != RemInst &&
990             "Already removed our local dep info");
991
992      LocalDeps[InstDependingOnRemInst] = NewDirtyVal;
993
994      // Make sure to remember that new things depend on NewDepInst.
995      assert(NewDirtyVal.getInst() && "There is no way something else can have "
996             "a local dep on this if it is a terminator!");
997      ReverseDepsToAdd.push_back(std::make_pair(NewDirtyVal.getInst(),
998                                                InstDependingOnRemInst));
999    }
1000
1001    ReverseLocalDeps.erase(ReverseDepIt);
1002
1003    // Add new reverse deps after scanning the set, to avoid invalidating the
1004    // 'ReverseDeps' reference.
1005    while (!ReverseDepsToAdd.empty()) {
1006      ReverseLocalDeps[ReverseDepsToAdd.back().first]
1007        .insert(ReverseDepsToAdd.back().second);
1008      ReverseDepsToAdd.pop_back();
1009    }
1010  }
1011
1012  ReverseDepIt = ReverseNonLocalDeps.find(RemInst);
1013  if (ReverseDepIt != ReverseNonLocalDeps.end()) {
1014    SmallPtrSet<Instruction*, 4> &Set = ReverseDepIt->second;
1015    for (SmallPtrSet<Instruction*, 4>::iterator I = Set.begin(), E = Set.end();
1016         I != E; ++I) {
1017      assert(*I != RemInst && "Already removed NonLocalDep info for RemInst");
1018
1019      PerInstNLInfo &INLD = NonLocalDeps[*I];
1020      // The information is now dirty!
1021      INLD.second = true;
1022
1023      for (NonLocalDepInfo::iterator DI = INLD.first.begin(),
1024           DE = INLD.first.end(); DI != DE; ++DI) {
1025        if (DI->second.getInst() != RemInst) continue;
1026
1027        // Convert to a dirty entry for the subsequent instruction.
1028        DI->second = NewDirtyVal;
1029
1030        if (Instruction *NextI = NewDirtyVal.getInst())
1031          ReverseDepsToAdd.push_back(std::make_pair(NextI, *I));
1032      }
1033    }
1034
1035    ReverseNonLocalDeps.erase(ReverseDepIt);
1036
1037    // Add new reverse deps after scanning the set, to avoid invalidating 'Set'
1038    while (!ReverseDepsToAdd.empty()) {
1039      ReverseNonLocalDeps[ReverseDepsToAdd.back().first]
1040        .insert(ReverseDepsToAdd.back().second);
1041      ReverseDepsToAdd.pop_back();
1042    }
1043  }
1044
1045  // If the instruction is in ReverseNonLocalPtrDeps then it appears as a
1046  // value in the NonLocalPointerDeps info.
1047  ReverseNonLocalPtrDepTy::iterator ReversePtrDepIt =
1048    ReverseNonLocalPtrDeps.find(RemInst);
1049  if (ReversePtrDepIt != ReverseNonLocalPtrDeps.end()) {
1050    SmallPtrSet<ValueIsLoadPair, 4> &Set = ReversePtrDepIt->second;
1051    SmallVector<std::pair<Instruction*, ValueIsLoadPair>,8> ReversePtrDepsToAdd;
1052
1053    for (SmallPtrSet<ValueIsLoadPair, 4>::iterator I = Set.begin(),
1054         E = Set.end(); I != E; ++I) {
1055      ValueIsLoadPair P = *I;
1056      assert(P.getPointer() != RemInst &&
1057             "Already removed NonLocalPointerDeps info for RemInst");
1058
1059      NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].second;
1060
1061      // The cache is not valid for any specific block anymore.
1062      NonLocalPointerDeps[P].first = BBSkipFirstBlockPair();
1063
1064      // Update any entries for RemInst to use the instruction after it.
1065      for (NonLocalDepInfo::iterator DI = NLPDI.begin(), DE = NLPDI.end();
1066           DI != DE; ++DI) {
1067        if (DI->second.getInst() != RemInst) continue;
1068
1069        // Convert to a dirty entry for the subsequent instruction.
1070        DI->second = NewDirtyVal;
1071
1072        if (Instruction *NewDirtyInst = NewDirtyVal.getInst())
1073          ReversePtrDepsToAdd.push_back(std::make_pair(NewDirtyInst, P));
1074      }
1075
1076      // Re-sort the NonLocalDepInfo.  Changing the dirty entry to its
1077      // subsequent value may invalidate the sortedness.
1078      std::sort(NLPDI.begin(), NLPDI.end());
1079    }
1080
1081    ReverseNonLocalPtrDeps.erase(ReversePtrDepIt);
1082
1083    while (!ReversePtrDepsToAdd.empty()) {
1084      ReverseNonLocalPtrDeps[ReversePtrDepsToAdd.back().first]
1085        .insert(ReversePtrDepsToAdd.back().second);
1086      ReversePtrDepsToAdd.pop_back();
1087    }
1088  }
1089
1090
1091  assert(!NonLocalDeps.count(RemInst) && "RemInst got reinserted?");
1092  AA->deleteValue(RemInst);
1093  DEBUG(verifyRemoved(RemInst));
1094}
1095/// verifyRemoved - Verify that the specified instruction does not occur
1096/// in our internal data structures.
1097void MemoryDependenceAnalysis::verifyRemoved(Instruction *D) const {
1098  for (LocalDepMapType::const_iterator I = LocalDeps.begin(),
1099       E = LocalDeps.end(); I != E; ++I) {
1100    assert(I->first != D && "Inst occurs in data structures");
1101    assert(I->second.getInst() != D &&
1102           "Inst occurs in data structures");
1103  }
1104
1105  for (CachedNonLocalPointerInfo::const_iterator I =NonLocalPointerDeps.begin(),
1106       E = NonLocalPointerDeps.end(); I != E; ++I) {
1107    assert(I->first.getPointer() != D && "Inst occurs in NLPD map key");
1108    const NonLocalDepInfo &Val = I->second.second;
1109    for (NonLocalDepInfo::const_iterator II = Val.begin(), E = Val.end();
1110         II != E; ++II)
1111      assert(II->second.getInst() != D && "Inst occurs as NLPD value");
1112  }
1113
1114  for (NonLocalDepMapType::const_iterator I = NonLocalDeps.begin(),
1115       E = NonLocalDeps.end(); I != E; ++I) {
1116    assert(I->first != D && "Inst occurs in data structures");
1117    const PerInstNLInfo &INLD = I->second;
1118    for (NonLocalDepInfo::const_iterator II = INLD.first.begin(),
1119         EE = INLD.first.end(); II  != EE; ++II)
1120      assert(II->second.getInst() != D && "Inst occurs in data structures");
1121  }
1122
1123  for (ReverseDepMapType::const_iterator I = ReverseLocalDeps.begin(),
1124       E = ReverseLocalDeps.end(); I != E; ++I) {
1125    assert(I->first != D && "Inst occurs in data structures");
1126    for (SmallPtrSet<Instruction*, 4>::const_iterator II = I->second.begin(),
1127         EE = I->second.end(); II != EE; ++II)
1128      assert(*II != D && "Inst occurs in data structures");
1129  }
1130
1131  for (ReverseDepMapType::const_iterator I = ReverseNonLocalDeps.begin(),
1132       E = ReverseNonLocalDeps.end();
1133       I != E; ++I) {
1134    assert(I->first != D && "Inst occurs in data structures");
1135    for (SmallPtrSet<Instruction*, 4>::const_iterator II = I->second.begin(),
1136         EE = I->second.end(); II != EE; ++II)
1137      assert(*II != D && "Inst occurs in data structures");
1138  }
1139
1140  for (ReverseNonLocalPtrDepTy::const_iterator
1141       I = ReverseNonLocalPtrDeps.begin(),
1142       E = ReverseNonLocalPtrDeps.end(); I != E; ++I) {
1143    assert(I->first != D && "Inst occurs in rev NLPD map");
1144
1145    for (SmallPtrSet<ValueIsLoadPair, 4>::const_iterator II = I->second.begin(),
1146         E = I->second.end(); II != E; ++II)
1147      assert(*II != ValueIsLoadPair(D, false) &&
1148             *II != ValueIsLoadPair(D, true) &&
1149             "Inst occurs in ReverseNonLocalPtrDeps map");
1150  }
1151
1152}
1153