DeadStoreElimination.cpp revision a04096580a65b6cfbe12dabf6d695f7303750c0d
1//===- DeadStoreElimination.cpp - Fast Dead Store Elimination -------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements a trivial dead store elimination that only considers
11// basic-block local redundant stores.
12//
13// FIXME: This should eventually be extended to be a post-dominator tree
14// traversal.  Doing so would be pretty trivial.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "dse"
19#include "llvm/Transforms/Scalar.h"
20#include "llvm/Constants.h"
21#include "llvm/Function.h"
22#include "llvm/Instructions.h"
23#include "llvm/IntrinsicInst.h"
24#include "llvm/Pass.h"
25#include "llvm/ADT/SmallPtrSet.h"
26#include "llvm/ADT/Statistic.h"
27#include "llvm/Analysis/AliasAnalysis.h"
28#include "llvm/Analysis/Dominators.h"
29#include "llvm/Analysis/MemoryBuiltins.h"
30#include "llvm/Analysis/MemoryDependenceAnalysis.h"
31#include "llvm/Analysis/ValueTracking.h"
32#include "llvm/Target/TargetData.h"
33#include "llvm/Transforms/Utils/Local.h"
34using namespace llvm;
35
36STATISTIC(NumFastStores, "Number of stores deleted");
37STATISTIC(NumFastOther , "Number of other instrs removed");
38
39namespace {
40  struct DSE : public FunctionPass {
41    AliasAnalysis *AA;
42    MemoryDependenceAnalysis *MD;
43
44    static char ID; // Pass identification, replacement for typeid
45    DSE() : FunctionPass(ID), AA(0), MD(0) {
46      initializeDSEPass(*PassRegistry::getPassRegistry());
47    }
48
49    virtual bool runOnFunction(Function &F) {
50      AA = &getAnalysis<AliasAnalysis>();
51      MD = &getAnalysis<MemoryDependenceAnalysis>();
52      DominatorTree &DT = getAnalysis<DominatorTree>();
53
54      bool Changed = false;
55      for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I)
56        // Only check non-dead blocks.  Dead blocks may have strange pointer
57        // cycles that will confuse alias analysis.
58        if (DT.isReachableFromEntry(I))
59          Changed |= runOnBasicBlock(*I);
60
61      AA = 0; MD = 0;
62      return Changed;
63    }
64
65    bool runOnBasicBlock(BasicBlock &BB);
66    bool HandleFree(CallInst *F);
67    bool handleEndBlock(BasicBlock &BB);
68    void RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
69                               SmallPtrSet<Value*, 16> &DeadStackObjects);
70
71
72    // getAnalysisUsage - We require post dominance frontiers (aka Control
73    // Dependence Graph)
74    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
75      AU.setPreservesCFG();
76      AU.addRequired<DominatorTree>();
77      AU.addRequired<AliasAnalysis>();
78      AU.addRequired<MemoryDependenceAnalysis>();
79      AU.addPreserved<AliasAnalysis>();
80      AU.addPreserved<DominatorTree>();
81      AU.addPreserved<MemoryDependenceAnalysis>();
82    }
83  };
84}
85
86char DSE::ID = 0;
87INITIALIZE_PASS_BEGIN(DSE, "dse", "Dead Store Elimination", false, false)
88INITIALIZE_PASS_DEPENDENCY(DominatorTree)
89INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
90INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
91INITIALIZE_PASS_END(DSE, "dse", "Dead Store Elimination", false, false)
92
93FunctionPass *llvm::createDeadStoreEliminationPass() { return new DSE(); }
94
95//===----------------------------------------------------------------------===//
96// Helper functions
97//===----------------------------------------------------------------------===//
98
99/// DeleteDeadInstruction - Delete this instruction.  Before we do, go through
100/// and zero out all the operands of this instruction.  If any of them become
101/// dead, delete them and the computation tree that feeds them.
102///
103/// If ValueSet is non-null, remove any deleted instructions from it as well.
104///
105static void DeleteDeadInstruction(Instruction *I,
106                                  MemoryDependenceAnalysis &MD,
107                                  SmallPtrSet<Value*, 16> *ValueSet = 0) {
108  SmallVector<Instruction*, 32> NowDeadInsts;
109
110  NowDeadInsts.push_back(I);
111  --NumFastOther;
112
113  // Before we touch this instruction, remove it from memdep!
114  do {
115    Instruction *DeadInst = NowDeadInsts.pop_back_val();
116    ++NumFastOther;
117
118    // This instruction is dead, zap it, in stages.  Start by removing it from
119    // MemDep, which needs to know the operands and needs it to be in the
120    // function.
121    MD.removeInstruction(DeadInst);
122
123    for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
124      Value *Op = DeadInst->getOperand(op);
125      DeadInst->setOperand(op, 0);
126
127      // If this operand just became dead, add it to the NowDeadInsts list.
128      if (!Op->use_empty()) continue;
129
130      if (Instruction *OpI = dyn_cast<Instruction>(Op))
131        if (isInstructionTriviallyDead(OpI))
132          NowDeadInsts.push_back(OpI);
133    }
134
135    DeadInst->eraseFromParent();
136
137    if (ValueSet) ValueSet->erase(DeadInst);
138  } while (!NowDeadInsts.empty());
139}
140
141
142/// hasMemoryWrite - Does this instruction write some memory?  This only returns
143/// true for things that we can analyze with other helpers below.
144static bool hasMemoryWrite(Instruction *I) {
145  if (isa<StoreInst>(I))
146    return true;
147  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
148    switch (II->getIntrinsicID()) {
149    default:
150      return false;
151    case Intrinsic::memset:
152    case Intrinsic::memmove:
153    case Intrinsic::memcpy:
154    case Intrinsic::init_trampoline:
155    case Intrinsic::lifetime_end:
156      return true;
157    }
158  }
159  return false;
160}
161
162/// getLocForWrite - Return a Location stored to by the specified instruction.
163static AliasAnalysis::Location
164getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
165  if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
166    return AA.getLocation(SI);
167
168  if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Inst)) {
169    // memcpy/memmove/memset.
170    AliasAnalysis::Location Loc = AA.getLocationForDest(MI);
171    // If we don't have target data around, an unknown size in Location means
172    // that we should use the size of the pointee type.  This isn't valid for
173    // memset/memcpy, which writes more than an i8.
174    if (Loc.Size == AliasAnalysis::UnknownSize && AA.getTargetData() == 0)
175      return AliasAnalysis::Location();
176    return Loc;
177  }
178
179  IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst);
180  if (II == 0) return AliasAnalysis::Location();
181
182  switch (II->getIntrinsicID()) {
183  default: return AliasAnalysis::Location(); // Unhandled intrinsic.
184  case Intrinsic::init_trampoline:
185    // If we don't have target data around, an unknown size in Location means
186    // that we should use the size of the pointee type.  This isn't valid for
187    // init.trampoline, which writes more than an i8.
188    if (AA.getTargetData() == 0) return AliasAnalysis::Location();
189
190    // FIXME: We don't know the size of the trampoline, so we can't really
191    // handle it here.
192    return AliasAnalysis::Location(II->getArgOperand(0));
193  case Intrinsic::lifetime_end: {
194    uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
195    return AliasAnalysis::Location(II->getArgOperand(1), Len);
196  }
197  }
198}
199
200/// isRemovable - If the value of this instruction and the memory it writes to
201/// is unused, may we delete this instruction?
202static bool isRemovable(Instruction *I) {
203  // Don't remove volatile stores.
204  if (StoreInst *SI = dyn_cast<StoreInst>(I))
205    return !SI->isVolatile();
206
207  IntrinsicInst *II = cast<IntrinsicInst>(I);
208  switch (II->getIntrinsicID()) {
209  default: assert(0 && "doesn't pass 'hasMemoryWrite' predicate");
210  case Intrinsic::lifetime_end:
211    // Never remove dead lifetime_end's, e.g. because it is followed by a
212    // free.
213    return false;
214  case Intrinsic::init_trampoline:
215    // Always safe to remove init_trampoline.
216    return true;
217
218  case Intrinsic::memset:
219  case Intrinsic::memmove:
220  case Intrinsic::memcpy:
221    // Don't remove volatile memory intrinsics.
222    return !cast<MemIntrinsic>(II)->isVolatile();
223  }
224}
225
226/// getStoredPointerOperand - Return the pointer that is being written to.
227static Value *getStoredPointerOperand(Instruction *I) {
228  if (StoreInst *SI = dyn_cast<StoreInst>(I))
229    return SI->getPointerOperand();
230  if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
231    return MI->getDest();
232
233  IntrinsicInst *II = cast<IntrinsicInst>(I);
234  switch (II->getIntrinsicID()) {
235  default: assert(false && "Unexpected intrinsic!");
236  case Intrinsic::init_trampoline:
237    return II->getArgOperand(0);
238  }
239}
240
241static uint64_t getPointerSize(Value *V, AliasAnalysis &AA) {
242  const TargetData *TD = AA.getTargetData();
243  if (TD == 0)
244    return AliasAnalysis::UnknownSize;
245
246  if (AllocaInst *A = dyn_cast<AllocaInst>(V)) {
247    // Get size information for the alloca
248    if (ConstantInt *C = dyn_cast<ConstantInt>(A->getArraySize()))
249      return C->getZExtValue() * TD->getTypeAllocSize(A->getAllocatedType());
250    return AliasAnalysis::UnknownSize;
251  }
252
253  assert(isa<Argument>(V) && "Expected AllocaInst or Argument!");
254  const PointerType *PT = cast<PointerType>(V->getType());
255  return TD->getTypeAllocSize(PT->getElementType());
256}
257
258
259/// isCompleteOverwrite - Return true if a store to the 'Later' location
260/// completely overwrites a store to the 'Earlier' location.
261static bool isCompleteOverwrite(const AliasAnalysis::Location &Later,
262                                const AliasAnalysis::Location &Earlier,
263                                AliasAnalysis &AA) {
264  const Value *P1 = Earlier.Ptr->stripPointerCasts();
265  const Value *P2 = Later.Ptr->stripPointerCasts();
266
267  // If the start pointers are the same, we just have to compare sizes to see if
268  // the later store was larger than the earlier store.
269  if (P1 == P2) {
270    // If we don't know the sizes of either access, then we can't do a
271    // comparison.
272    if (Later.Size == AliasAnalysis::UnknownSize ||
273        Earlier.Size == AliasAnalysis::UnknownSize) {
274      // If we have no TargetData information around, then the size of the store
275      // is inferrable from the pointee type.  If they are the same type, then
276      // we know that the store is safe.
277      if (AA.getTargetData() == 0)
278        return Later.Ptr->getType() == Earlier.Ptr->getType();
279      return false;
280    }
281
282    // Make sure that the Later size is >= the Earlier size.
283    if (Later.Size < Earlier.Size)
284      return false;
285    return true;
286  }
287
288  // Otherwise, we have to have size information, and the later store has to be
289  // larger than the earlier one.
290  if (Later.Size == AliasAnalysis::UnknownSize ||
291      Earlier.Size == AliasAnalysis::UnknownSize ||
292      Later.Size <= Earlier.Size ||
293      AA.getTargetData() == 0)
294    return false;
295
296  const TargetData &TD = *AA.getTargetData();
297
298  // Okay, we have stores to two completely different pointers.  Try to
299  // decompose the pointer into a "base + constant_offset" form.  If the base
300  // pointers are equal, then we can reason about the two stores.
301  int64_t Off1 = 0, Off2 = 0;
302  const Value *BP1 = GetPointerBaseWithConstantOffset(P1, Off1, TD);
303  const Value *BP2 = GetPointerBaseWithConstantOffset(P2, Off2, TD);
304
305  // If the base pointers still differ, we have two completely different stores.
306  if (BP1 != BP2)
307    return false;
308
309  // Otherwise, we might have a situation like:
310  //  store i16 -> P + 1 Byte
311  //  store i32 -> P
312  // In this case, we see if the later store completely overlaps all bytes
313  // stored by the previous store.
314  if (Off1 < Off2 ||                       // Earlier starts before Later.
315      Off1+Earlier.Size > Off2+Later.Size) // Earlier goes beyond Later.
316    return false;
317  // Otherwise, we have complete overlap.
318  return true;
319}
320
321
322//===----------------------------------------------------------------------===//
323// DSE Pass
324//===----------------------------------------------------------------------===//
325
326bool DSE::runOnBasicBlock(BasicBlock &BB) {
327  bool MadeChange = false;
328
329  // Do a top-down walk on the BB.
330  for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) {
331    Instruction *Inst = BBI++;
332
333    // Handle 'free' calls specially.
334    if (CallInst *F = isFreeCall(Inst)) {
335      MadeChange |= HandleFree(F);
336      continue;
337    }
338
339    // If we find something that writes memory, get its memory dependence.
340    if (!hasMemoryWrite(Inst))
341      continue;
342
343    MemDepResult InstDep = MD->getDependency(Inst);
344
345    // Ignore non-local store liveness.
346    // FIXME: cross-block DSE would be fun. :)
347    if (InstDep.isNonLocal() ||
348        // Ignore self dependence, which happens in the entry block of the
349        // function.
350        InstDep.getInst() == Inst)
351      continue;
352
353    // If we're storing the same value back to a pointer that we just
354    // loaded from, then the store can be removed.
355    if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
356      if (LoadInst *DepLoad = dyn_cast<LoadInst>(InstDep.getInst())) {
357        if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
358            SI->getOperand(0) == DepLoad && !SI->isVolatile()) {
359          // DeleteDeadInstruction can delete the current instruction.  Save BBI
360          // in case we need it.
361          WeakVH NextInst(BBI);
362
363          DeleteDeadInstruction(SI, *MD);
364
365          if (NextInst == 0)  // Next instruction deleted.
366            BBI = BB.begin();
367          else if (BBI != BB.begin())  // Revisit this instruction if possible.
368            --BBI;
369          ++NumFastStores;
370          MadeChange = true;
371          continue;
372        }
373      }
374    }
375
376    // Figure out what location is being stored to.
377    AliasAnalysis::Location Loc = getLocForWrite(Inst, *AA);
378
379    // If we didn't get a useful location, fail.
380    if (Loc.Ptr == 0)
381      continue;
382
383    while (!InstDep.isNonLocal()) {
384      // Get the memory clobbered by the instruction we depend on.  MemDep will
385      // skip any instructions that 'Loc' clearly doesn't interact with.  If we
386      // end up depending on a may- or must-aliased load, then we can't optimize
387      // away the store and we bail out.  However, if we depend on on something
388      // that overwrites the memory location we *can* potentially optimize it.
389      //
390      // Find out what memory location the dependant instruction stores.
391      Instruction *DepWrite = InstDep.getInst();
392      AliasAnalysis::Location DepLoc = getLocForWrite(DepWrite, *AA);
393      // If we didn't get a useful location, or if it isn't a size, bail out.
394      if (DepLoc.Ptr == 0)
395        break;
396
397      // If we find a removable write that is completely obliterated by the
398      // store to 'Loc' then we can remove it.
399      if (isRemovable(DepWrite) && isCompleteOverwrite(Loc, DepLoc, *AA)) {
400        // Delete the store and now-dead instructions that feed it.
401        DeleteDeadInstruction(DepWrite, *MD);
402        ++NumFastStores;
403        MadeChange = true;
404
405        // DeleteDeadInstruction can delete the current instruction in loop
406        // cases, reset BBI.
407        BBI = Inst;
408        if (BBI != BB.begin())
409          --BBI;
410        break;
411      }
412
413      // If this is a may-aliased store that is clobbering the store value, we
414      // can keep searching past it for another must-aliased pointer that stores
415      // to the same location.  For example, in:
416      //   store -> P
417      //   store -> Q
418      //   store -> P
419      // we can remove the first store to P even though we don't know if P and Q
420      // alias.
421      if (DepWrite == &BB.front()) break;
422
423      // Can't look past this instruction if it might read 'Loc'.
424      if (AA->getModRefInfo(DepWrite, Loc) & AliasAnalysis::Ref)
425        break;
426
427      InstDep = MD->getPointerDependencyFrom(Loc, false, DepWrite, &BB);
428    }
429  }
430
431  // If this block ends in a return, unwind, or unreachable, all allocas are
432  // dead at its end, which means stores to them are also dead.
433  if (BB.getTerminator()->getNumSuccessors() == 0)
434    MadeChange |= handleEndBlock(BB);
435
436  return MadeChange;
437}
438
439/// HandleFree - Handle frees of entire structures whose dependency is a store
440/// to a field of that structure.
441bool DSE::HandleFree(CallInst *F) {
442  MemDepResult Dep = MD->getDependency(F);
443  do {
444    if (Dep.isNonLocal()) return false;
445
446    Instruction *Dependency = Dep.getInst();
447    if (!hasMemoryWrite(Dependency) || !isRemovable(Dependency))
448      return false;
449
450    Value *DepPointer =
451      getStoredPointerOperand(Dependency)->getUnderlyingObject();
452
453    // Check for aliasing.
454    if (AA->alias(F->getArgOperand(0), 1, DepPointer, 1) !=
455          AliasAnalysis::MustAlias)
456      return false;
457
458    // DCE instructions only used to calculate that store
459    DeleteDeadInstruction(Dependency, *MD);
460    ++NumFastStores;
461
462    // Inst's old Dependency is now deleted. Compute the next dependency,
463    // which may also be dead, as in
464    //    s[0] = 0;
465    //    s[1] = 0; // This has just been deleted.
466    //    free(s);
467    Dep = MD->getDependency(F);
468  } while (!Dep.isNonLocal());
469
470  return true;
471}
472
473/// handleEndBlock - Remove dead stores to stack-allocated locations in the
474/// function end block.  Ex:
475/// %A = alloca i32
476/// ...
477/// store i32 1, i32* %A
478/// ret void
479bool DSE::handleEndBlock(BasicBlock &BB) {
480  bool MadeChange = false;
481
482  // Keep track of all of the stack objects that are dead at the end of the
483  // function.
484  SmallPtrSet<Value*, 16> DeadStackObjects;
485
486  // Find all of the alloca'd pointers in the entry block.
487  BasicBlock *Entry = BB.getParent()->begin();
488  for (BasicBlock::iterator I = Entry->begin(), E = Entry->end(); I != E; ++I)
489    if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
490      DeadStackObjects.insert(AI);
491
492  // Treat byval arguments the same, stores to them are dead at the end of the
493  // function.
494  for (Function::arg_iterator AI = BB.getParent()->arg_begin(),
495       AE = BB.getParent()->arg_end(); AI != AE; ++AI)
496    if (AI->hasByValAttr())
497      DeadStackObjects.insert(AI);
498
499  // Scan the basic block backwards
500  for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
501    --BBI;
502
503    // If we find a store, check to see if it points into a dead stack value.
504    if (hasMemoryWrite(BBI) && isRemovable(BBI)) {
505      // See through pointer-to-pointer bitcasts
506      Value *Pointer = getStoredPointerOperand(BBI)->getUnderlyingObject();
507
508      // Stores to stack values are valid candidates for removal.
509      if (DeadStackObjects.count(Pointer)) {
510        // DCE instructions only used to calculate that store.
511        Instruction *Dead = BBI++;
512        DeleteDeadInstruction(Dead, *MD, &DeadStackObjects);
513        ++NumFastStores;
514        MadeChange = true;
515        continue;
516      }
517    }
518
519    // Remove any dead non-memory-mutating instructions.
520    if (isInstructionTriviallyDead(BBI)) {
521      Instruction *Inst = BBI++;
522      DeleteDeadInstruction(Inst, *MD, &DeadStackObjects);
523      ++NumFastOther;
524      MadeChange = true;
525      continue;
526    }
527
528    if (AllocaInst *A = dyn_cast<AllocaInst>(BBI)) {
529      DeadStackObjects.erase(A);
530      continue;
531    }
532
533    if (CallSite CS = cast<Value>(BBI)) {
534      // If this call does not access memory, it can't be loading any of our
535      // pointers.
536      if (AA->doesNotAccessMemory(CS))
537        continue;
538
539      unsigned NumModRef = 0, NumOther = 0;
540
541      // If the call might load from any of our allocas, then any store above
542      // the call is live.
543      SmallVector<Value*, 8> LiveAllocas;
544      for (SmallPtrSet<Value*, 16>::iterator I = DeadStackObjects.begin(),
545           E = DeadStackObjects.end(); I != E; ++I) {
546        // If we detect that our AA is imprecise, it's not worth it to scan the
547        // rest of the DeadPointers set.  Just assume that the AA will return
548        // ModRef for everything, and go ahead and bail out.
549        if (NumModRef >= 16 && NumOther == 0)
550          return MadeChange;
551
552        // See if the call site touches it.
553        AliasAnalysis::ModRefResult A =
554          AA->getModRefInfo(CS, *I, getPointerSize(*I, *AA));
555
556        if (A == AliasAnalysis::ModRef)
557          ++NumModRef;
558        else
559          ++NumOther;
560
561        if (A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref)
562          LiveAllocas.push_back(*I);
563      }
564
565      for (SmallVector<Value*, 8>::iterator I = LiveAllocas.begin(),
566           E = LiveAllocas.end(); I != E; ++I)
567        DeadStackObjects.erase(*I);
568
569      // If all of the allocas were clobbered by the call then we're not going
570      // to find anything else to process.
571      if (DeadStackObjects.empty())
572        return MadeChange;
573
574      continue;
575    }
576
577    AliasAnalysis::Location LoadedLoc;
578
579    // If we encounter a use of the pointer, it is no longer considered dead
580    if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
581      LoadedLoc = AA->getLocation(L);
582    } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
583      LoadedLoc = AA->getLocation(V);
584    } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(BBI)) {
585      LoadedLoc = AA->getLocationForSource(MTI);
586    } else {
587      // Not a loading instruction.
588      continue;
589    }
590
591    // Remove any allocas from the DeadPointer set that are loaded, as this
592    // makes any stores above the access live.
593    RemoveAccessedObjects(LoadedLoc, DeadStackObjects);
594
595    // If all of the allocas were clobbered by the access then we're not going
596    // to find anything else to process.
597    if (DeadStackObjects.empty())
598      break;
599  }
600
601  return MadeChange;
602}
603
604/// RemoveAccessedObjects - Check to see if the specified location may alias any
605/// of the stack objects in the DeadStackObjects set.  If so, they become live
606/// because the location is being loaded.
607void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
608                                SmallPtrSet<Value*, 16> &DeadStackObjects) {
609  const Value *UnderlyingPointer = LoadedLoc.Ptr->getUnderlyingObject();
610
611  // A constant can't be in the dead pointer set.
612  if (isa<Constant>(UnderlyingPointer))
613    return;
614
615  // If the kill pointer can be easily reduced to an alloca, don't bother doing
616  // extraneous AA queries.
617  if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) {
618    DeadStackObjects.erase(const_cast<Value*>(UnderlyingPointer));
619    return;
620  }
621
622  SmallVector<Value*, 16> NowLive;
623  for (SmallPtrSet<Value*, 16>::iterator I = DeadStackObjects.begin(),
624       E = DeadStackObjects.end(); I != E; ++I) {
625    // See if the loaded location could alias the stack location.
626    AliasAnalysis::Location StackLoc(*I, getPointerSize(*I, *AA));
627    if (!AA->isNoAlias(StackLoc, LoadedLoc))
628      NowLive.push_back(*I);
629  }
630
631  for (SmallVector<Value*, 16>::iterator I = NowLive.begin(), E = NowLive.end();
632       I != E; ++I)
633    DeadStackObjects.erase(*I);
634}
635
636