1//===- DeadStoreElimination.cpp - Fast Dead Store Elimination -------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements a trivial dead store elimination that only considers
11// basic-block local redundant stores.
12//
13// FIXME: This should eventually be extended to be a post-dominator tree
14// traversal.  Doing so would be pretty trivial.
15//
16//===----------------------------------------------------------------------===//
17
18#include "llvm/Transforms/Scalar.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SetVector.h"
21#include "llvm/ADT/Statistic.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/Analysis/CaptureTracking.h"
24#include "llvm/Analysis/MemoryBuiltins.h"
25#include "llvm/Analysis/MemoryDependenceAnalysis.h"
26#include "llvm/Analysis/TargetLibraryInfo.h"
27#include "llvm/Analysis/ValueTracking.h"
28#include "llvm/IR/Constants.h"
29#include "llvm/IR/DataLayout.h"
30#include "llvm/IR/Dominators.h"
31#include "llvm/IR/Function.h"
32#include "llvm/IR/GlobalVariable.h"
33#include "llvm/IR/Instructions.h"
34#include "llvm/IR/IntrinsicInst.h"
35#include "llvm/Pass.h"
36#include "llvm/Support/Debug.h"
37#include "llvm/Support/raw_ostream.h"
38#include "llvm/Transforms/Utils/Local.h"
39using namespace llvm;
40
41#define DEBUG_TYPE "dse"
42
43STATISTIC(NumFastStores, "Number of stores deleted");
44STATISTIC(NumFastOther , "Number of other instrs removed");
45
46namespace {
47  struct DSE : public FunctionPass {
48    AliasAnalysis *AA;
49    MemoryDependenceAnalysis *MD;
50    DominatorTree *DT;
51    const TargetLibraryInfo *TLI;
52
53    static char ID; // Pass identification, replacement for typeid
54    DSE() : FunctionPass(ID), AA(nullptr), MD(nullptr), DT(nullptr) {
55      initializeDSEPass(*PassRegistry::getPassRegistry());
56    }
57
58    bool runOnFunction(Function &F) override {
59      if (skipOptnoneFunction(F))
60        return false;
61
62      AA = &getAnalysis<AliasAnalysis>();
63      MD = &getAnalysis<MemoryDependenceAnalysis>();
64      DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
65      TLI = AA->getTargetLibraryInfo();
66
67      bool Changed = false;
68      for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I)
69        // Only check non-dead blocks.  Dead blocks may have strange pointer
70        // cycles that will confuse alias analysis.
71        if (DT->isReachableFromEntry(I))
72          Changed |= runOnBasicBlock(*I);
73
74      AA = nullptr; MD = nullptr; DT = nullptr;
75      return Changed;
76    }
77
78    bool runOnBasicBlock(BasicBlock &BB);
79    bool HandleFree(CallInst *F);
80    bool handleEndBlock(BasicBlock &BB);
81    void RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
82                               SmallSetVector<Value *, 16> &DeadStackObjects,
83                               const DataLayout &DL);
84
85    void getAnalysisUsage(AnalysisUsage &AU) const override {
86      AU.setPreservesCFG();
87      AU.addRequired<DominatorTreeWrapperPass>();
88      AU.addRequired<AliasAnalysis>();
89      AU.addRequired<MemoryDependenceAnalysis>();
90      AU.addPreserved<AliasAnalysis>();
91      AU.addPreserved<DominatorTreeWrapperPass>();
92      AU.addPreserved<MemoryDependenceAnalysis>();
93    }
94  };
95}
96
97char DSE::ID = 0;
98INITIALIZE_PASS_BEGIN(DSE, "dse", "Dead Store Elimination", false, false)
99INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
100INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
101INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
102INITIALIZE_PASS_END(DSE, "dse", "Dead Store Elimination", false, false)
103
104FunctionPass *llvm::createDeadStoreEliminationPass() { return new DSE(); }
105
106//===----------------------------------------------------------------------===//
107// Helper functions
108//===----------------------------------------------------------------------===//
109
110/// DeleteDeadInstruction - Delete this instruction.  Before we do, go through
111/// and zero out all the operands of this instruction.  If any of them become
112/// dead, delete them and the computation tree that feeds them.
113///
114/// If ValueSet is non-null, remove any deleted instructions from it as well.
115///
116static void DeleteDeadInstruction(Instruction *I,
117                               MemoryDependenceAnalysis &MD,
118                               const TargetLibraryInfo *TLI,
119                               SmallSetVector<Value*, 16> *ValueSet = nullptr) {
120  SmallVector<Instruction*, 32> NowDeadInsts;
121
122  NowDeadInsts.push_back(I);
123  --NumFastOther;
124
125  // Before we touch this instruction, remove it from memdep!
126  do {
127    Instruction *DeadInst = NowDeadInsts.pop_back_val();
128    ++NumFastOther;
129
130    // This instruction is dead, zap it, in stages.  Start by removing it from
131    // MemDep, which needs to know the operands and needs it to be in the
132    // function.
133    MD.removeInstruction(DeadInst);
134
135    for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
136      Value *Op = DeadInst->getOperand(op);
137      DeadInst->setOperand(op, nullptr);
138
139      // If this operand just became dead, add it to the NowDeadInsts list.
140      if (!Op->use_empty()) continue;
141
142      if (Instruction *OpI = dyn_cast<Instruction>(Op))
143        if (isInstructionTriviallyDead(OpI, TLI))
144          NowDeadInsts.push_back(OpI);
145    }
146
147    DeadInst->eraseFromParent();
148
149    if (ValueSet) ValueSet->remove(DeadInst);
150  } while (!NowDeadInsts.empty());
151}
152
153
154/// hasMemoryWrite - Does this instruction write some memory?  This only returns
155/// true for things that we can analyze with other helpers below.
156static bool hasMemoryWrite(Instruction *I, const TargetLibraryInfo *TLI) {
157  if (isa<StoreInst>(I))
158    return true;
159  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
160    switch (II->getIntrinsicID()) {
161    default:
162      return false;
163    case Intrinsic::memset:
164    case Intrinsic::memmove:
165    case Intrinsic::memcpy:
166    case Intrinsic::init_trampoline:
167    case Intrinsic::lifetime_end:
168      return true;
169    }
170  }
171  if (auto CS = CallSite(I)) {
172    if (Function *F = CS.getCalledFunction()) {
173      if (TLI && TLI->has(LibFunc::strcpy) &&
174          F->getName() == TLI->getName(LibFunc::strcpy)) {
175        return true;
176      }
177      if (TLI && TLI->has(LibFunc::strncpy) &&
178          F->getName() == TLI->getName(LibFunc::strncpy)) {
179        return true;
180      }
181      if (TLI && TLI->has(LibFunc::strcat) &&
182          F->getName() == TLI->getName(LibFunc::strcat)) {
183        return true;
184      }
185      if (TLI && TLI->has(LibFunc::strncat) &&
186          F->getName() == TLI->getName(LibFunc::strncat)) {
187        return true;
188      }
189    }
190  }
191  return false;
192}
193
194/// getLocForWrite - Return a Location stored to by the specified instruction.
195/// If isRemovable returns true, this function and getLocForRead completely
196/// describe the memory operations for this instruction.
197static AliasAnalysis::Location
198getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
199  if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
200    return AA.getLocation(SI);
201
202  if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Inst)) {
203    // memcpy/memmove/memset.
204    AliasAnalysis::Location Loc = AA.getLocationForDest(MI);
205    return Loc;
206  }
207
208  IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst);
209  if (!II) return AliasAnalysis::Location();
210
211  switch (II->getIntrinsicID()) {
212  default: return AliasAnalysis::Location(); // Unhandled intrinsic.
213  case Intrinsic::init_trampoline:
214    // FIXME: We don't know the size of the trampoline, so we can't really
215    // handle it here.
216    return AliasAnalysis::Location(II->getArgOperand(0));
217  case Intrinsic::lifetime_end: {
218    uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
219    return AliasAnalysis::Location(II->getArgOperand(1), Len);
220  }
221  }
222}
223
224/// getLocForRead - Return the location read by the specified "hasMemoryWrite"
225/// instruction if any.
226static AliasAnalysis::Location
227getLocForRead(Instruction *Inst, AliasAnalysis &AA) {
228  assert(hasMemoryWrite(Inst, AA.getTargetLibraryInfo()) &&
229         "Unknown instruction case");
230
231  // The only instructions that both read and write are the mem transfer
232  // instructions (memcpy/memmove).
233  if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(Inst))
234    return AA.getLocationForSource(MTI);
235  return AliasAnalysis::Location();
236}
237
238
239/// isRemovable - If the value of this instruction and the memory it writes to
240/// is unused, may we delete this instruction?
241static bool isRemovable(Instruction *I) {
242  // Don't remove volatile/atomic stores.
243  if (StoreInst *SI = dyn_cast<StoreInst>(I))
244    return SI->isUnordered();
245
246  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
247    switch (II->getIntrinsicID()) {
248    default: llvm_unreachable("doesn't pass 'hasMemoryWrite' predicate");
249    case Intrinsic::lifetime_end:
250      // Never remove dead lifetime_end's, e.g. because it is followed by a
251      // free.
252      return false;
253    case Intrinsic::init_trampoline:
254      // Always safe to remove init_trampoline.
255      return true;
256
257    case Intrinsic::memset:
258    case Intrinsic::memmove:
259    case Intrinsic::memcpy:
260      // Don't remove volatile memory intrinsics.
261      return !cast<MemIntrinsic>(II)->isVolatile();
262    }
263  }
264
265  if (auto CS = CallSite(I))
266    return CS.getInstruction()->use_empty();
267
268  return false;
269}
270
271
272/// isShortenable - Returns true if this instruction can be safely shortened in
273/// length.
274static bool isShortenable(Instruction *I) {
275  // Don't shorten stores for now
276  if (isa<StoreInst>(I))
277    return false;
278
279  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
280    switch (II->getIntrinsicID()) {
281      default: return false;
282      case Intrinsic::memset:
283      case Intrinsic::memcpy:
284        // Do shorten memory intrinsics.
285        return true;
286    }
287  }
288
289  // Don't shorten libcalls calls for now.
290
291  return false;
292}
293
294/// getStoredPointerOperand - Return the pointer that is being written to.
295static Value *getStoredPointerOperand(Instruction *I) {
296  if (StoreInst *SI = dyn_cast<StoreInst>(I))
297    return SI->getPointerOperand();
298  if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
299    return MI->getDest();
300
301  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
302    switch (II->getIntrinsicID()) {
303    default: llvm_unreachable("Unexpected intrinsic!");
304    case Intrinsic::init_trampoline:
305      return II->getArgOperand(0);
306    }
307  }
308
309  CallSite CS(I);
310  // All the supported functions so far happen to have dest as their first
311  // argument.
312  return CS.getArgument(0);
313}
314
315static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
316                               const TargetLibraryInfo *TLI) {
317  uint64_t Size;
318  if (getObjectSize(V, Size, DL, TLI))
319    return Size;
320  return AliasAnalysis::UnknownSize;
321}
322
323namespace {
324  enum OverwriteResult
325  {
326    OverwriteComplete,
327    OverwriteEnd,
328    OverwriteUnknown
329  };
330}
331
332/// isOverwrite - Return 'OverwriteComplete' if a store to the 'Later' location
333/// completely overwrites a store to the 'Earlier' location.
334/// 'OverwriteEnd' if the end of the 'Earlier' location is completely
335/// overwritten by 'Later', or 'OverwriteUnknown' if nothing can be determined
336static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
337                                   const AliasAnalysis::Location &Earlier,
338                                   const DataLayout &DL,
339                                   const TargetLibraryInfo *TLI,
340                                   int64_t &EarlierOff, int64_t &LaterOff) {
341  const Value *P1 = Earlier.Ptr->stripPointerCasts();
342  const Value *P2 = Later.Ptr->stripPointerCasts();
343
344  // If the start pointers are the same, we just have to compare sizes to see if
345  // the later store was larger than the earlier store.
346  if (P1 == P2) {
347    // If we don't know the sizes of either access, then we can't do a
348    // comparison.
349    if (Later.Size == AliasAnalysis::UnknownSize ||
350        Earlier.Size == AliasAnalysis::UnknownSize)
351      return OverwriteUnknown;
352
353    // Make sure that the Later size is >= the Earlier size.
354    if (Later.Size >= Earlier.Size)
355      return OverwriteComplete;
356  }
357
358  // Otherwise, we have to have size information, and the later store has to be
359  // larger than the earlier one.
360  if (Later.Size == AliasAnalysis::UnknownSize ||
361      Earlier.Size == AliasAnalysis::UnknownSize)
362    return OverwriteUnknown;
363
364  // Check to see if the later store is to the entire object (either a global,
365  // an alloca, or a byval/inalloca argument).  If so, then it clearly
366  // overwrites any other store to the same object.
367  const Value *UO1 = GetUnderlyingObject(P1, DL),
368              *UO2 = GetUnderlyingObject(P2, DL);
369
370  // If we can't resolve the same pointers to the same object, then we can't
371  // analyze them at all.
372  if (UO1 != UO2)
373    return OverwriteUnknown;
374
375  // If the "Later" store is to a recognizable object, get its size.
376  uint64_t ObjectSize = getPointerSize(UO2, DL, TLI);
377  if (ObjectSize != AliasAnalysis::UnknownSize)
378    if (ObjectSize == Later.Size && ObjectSize >= Earlier.Size)
379      return OverwriteComplete;
380
381  // Okay, we have stores to two completely different pointers.  Try to
382  // decompose the pointer into a "base + constant_offset" form.  If the base
383  // pointers are equal, then we can reason about the two stores.
384  EarlierOff = 0;
385  LaterOff = 0;
386  const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL);
387  const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL);
388
389  // If the base pointers still differ, we have two completely different stores.
390  if (BP1 != BP2)
391    return OverwriteUnknown;
392
393  // The later store completely overlaps the earlier store if:
394  //
395  // 1. Both start at the same offset and the later one's size is greater than
396  //    or equal to the earlier one's, or
397  //
398  //      |--earlier--|
399  //      |--   later   --|
400  //
401  // 2. The earlier store has an offset greater than the later offset, but which
402  //    still lies completely within the later store.
403  //
404  //        |--earlier--|
405  //    |-----  later  ------|
406  //
407  // We have to be careful here as *Off is signed while *.Size is unsigned.
408  if (EarlierOff >= LaterOff &&
409      Later.Size >= Earlier.Size &&
410      uint64_t(EarlierOff - LaterOff) + Earlier.Size <= Later.Size)
411    return OverwriteComplete;
412
413  // The other interesting case is if the later store overwrites the end of
414  // the earlier store
415  //
416  //      |--earlier--|
417  //                |--   later   --|
418  //
419  // In this case we may want to trim the size of earlier to avoid generating
420  // writes to addresses which will definitely be overwritten later
421  if (LaterOff > EarlierOff &&
422      LaterOff < int64_t(EarlierOff + Earlier.Size) &&
423      int64_t(LaterOff + Later.Size) >= int64_t(EarlierOff + Earlier.Size))
424    return OverwriteEnd;
425
426  // Otherwise, they don't completely overlap.
427  return OverwriteUnknown;
428}
429
430/// isPossibleSelfRead - If 'Inst' might be a self read (i.e. a noop copy of a
431/// memory region into an identical pointer) then it doesn't actually make its
432/// input dead in the traditional sense.  Consider this case:
433///
434///   memcpy(A <- B)
435///   memcpy(A <- A)
436///
437/// In this case, the second store to A does not make the first store to A dead.
438/// The usual situation isn't an explicit A<-A store like this (which can be
439/// trivially removed) but a case where two pointers may alias.
440///
441/// This function detects when it is unsafe to remove a dependent instruction
442/// because the DSE inducing instruction may be a self-read.
443static bool isPossibleSelfRead(Instruction *Inst,
444                               const AliasAnalysis::Location &InstStoreLoc,
445                               Instruction *DepWrite, AliasAnalysis &AA) {
446  // Self reads can only happen for instructions that read memory.  Get the
447  // location read.
448  AliasAnalysis::Location InstReadLoc = getLocForRead(Inst, AA);
449  if (!InstReadLoc.Ptr) return false;  // Not a reading instruction.
450
451  // If the read and written loc obviously don't alias, it isn't a read.
452  if (AA.isNoAlias(InstReadLoc, InstStoreLoc)) return false;
453
454  // Okay, 'Inst' may copy over itself.  However, we can still remove a the
455  // DepWrite instruction if we can prove that it reads from the same location
456  // as Inst.  This handles useful cases like:
457  //   memcpy(A <- B)
458  //   memcpy(A <- B)
459  // Here we don't know if A/B may alias, but we do know that B/B are must
460  // aliases, so removing the first memcpy is safe (assuming it writes <= #
461  // bytes as the second one.
462  AliasAnalysis::Location DepReadLoc = getLocForRead(DepWrite, AA);
463
464  if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr))
465    return false;
466
467  // If DepWrite doesn't read memory or if we can't prove it is a must alias,
468  // then it can't be considered dead.
469  return true;
470}
471
472
473//===----------------------------------------------------------------------===//
474// DSE Pass
475//===----------------------------------------------------------------------===//
476
477bool DSE::runOnBasicBlock(BasicBlock &BB) {
478  bool MadeChange = false;
479
480  // Do a top-down walk on the BB.
481  for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) {
482    Instruction *Inst = BBI++;
483
484    // Handle 'free' calls specially.
485    if (CallInst *F = isFreeCall(Inst, TLI)) {
486      MadeChange |= HandleFree(F);
487      continue;
488    }
489
490    // If we find something that writes memory, get its memory dependence.
491    if (!hasMemoryWrite(Inst, TLI))
492      continue;
493
494    MemDepResult InstDep = MD->getDependency(Inst);
495
496    // Ignore any store where we can't find a local dependence.
497    // FIXME: cross-block DSE would be fun. :)
498    if (!InstDep.isDef() && !InstDep.isClobber())
499      continue;
500
501    // If we're storing the same value back to a pointer that we just
502    // loaded from, then the store can be removed.
503    if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
504      if (LoadInst *DepLoad = dyn_cast<LoadInst>(InstDep.getInst())) {
505        if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
506            SI->getOperand(0) == DepLoad && isRemovable(SI)) {
507          DEBUG(dbgs() << "DSE: Remove Store Of Load from same pointer:\n  "
508                       << "LOAD: " << *DepLoad << "\n  STORE: " << *SI << '\n');
509
510          // DeleteDeadInstruction can delete the current instruction.  Save BBI
511          // in case we need it.
512          WeakVH NextInst(BBI);
513
514          DeleteDeadInstruction(SI, *MD, TLI);
515
516          if (!NextInst)  // Next instruction deleted.
517            BBI = BB.begin();
518          else if (BBI != BB.begin())  // Revisit this instruction if possible.
519            --BBI;
520          ++NumFastStores;
521          MadeChange = true;
522          continue;
523        }
524      }
525    }
526
527    // Figure out what location is being stored to.
528    AliasAnalysis::Location Loc = getLocForWrite(Inst, *AA);
529
530    // If we didn't get a useful location, fail.
531    if (!Loc.Ptr)
532      continue;
533
534    while (InstDep.isDef() || InstDep.isClobber()) {
535      // Get the memory clobbered by the instruction we depend on.  MemDep will
536      // skip any instructions that 'Loc' clearly doesn't interact with.  If we
537      // end up depending on a may- or must-aliased load, then we can't optimize
538      // away the store and we bail out.  However, if we depend on on something
539      // that overwrites the memory location we *can* potentially optimize it.
540      //
541      // Find out what memory location the dependent instruction stores.
542      Instruction *DepWrite = InstDep.getInst();
543      AliasAnalysis::Location DepLoc = getLocForWrite(DepWrite, *AA);
544      // If we didn't get a useful location, or if it isn't a size, bail out.
545      if (!DepLoc.Ptr)
546        break;
547
548      // If we find a write that is a) removable (i.e., non-volatile), b) is
549      // completely obliterated by the store to 'Loc', and c) which we know that
550      // 'Inst' doesn't load from, then we can remove it.
551      if (isRemovable(DepWrite) &&
552          !isPossibleSelfRead(Inst, Loc, DepWrite, *AA)) {
553        int64_t InstWriteOffset, DepWriteOffset;
554        const DataLayout &DL = BB.getModule()->getDataLayout();
555        OverwriteResult OR =
556            isOverwrite(Loc, DepLoc, DL, AA->getTargetLibraryInfo(),
557                        DepWriteOffset, InstWriteOffset);
558        if (OR == OverwriteComplete) {
559          DEBUG(dbgs() << "DSE: Remove Dead Store:\n  DEAD: "
560                << *DepWrite << "\n  KILLER: " << *Inst << '\n');
561
562          // Delete the store and now-dead instructions that feed it.
563          DeleteDeadInstruction(DepWrite, *MD, TLI);
564          ++NumFastStores;
565          MadeChange = true;
566
567          // DeleteDeadInstruction can delete the current instruction in loop
568          // cases, reset BBI.
569          BBI = Inst;
570          if (BBI != BB.begin())
571            --BBI;
572          break;
573        } else if (OR == OverwriteEnd && isShortenable(DepWrite)) {
574          // TODO: base this on the target vector size so that if the earlier
575          // store was too small to get vector writes anyway then its likely
576          // a good idea to shorten it
577          // Power of 2 vector writes are probably always a bad idea to optimize
578          // as any store/memset/memcpy is likely using vector instructions so
579          // shortening it to not vector size is likely to be slower
580          MemIntrinsic* DepIntrinsic = cast<MemIntrinsic>(DepWrite);
581          unsigned DepWriteAlign = DepIntrinsic->getAlignment();
582          if (llvm::isPowerOf2_64(InstWriteOffset) ||
583              ((DepWriteAlign != 0) && InstWriteOffset % DepWriteAlign == 0)) {
584
585            DEBUG(dbgs() << "DSE: Remove Dead Store:\n  OW END: "
586                  << *DepWrite << "\n  KILLER (offset "
587                  << InstWriteOffset << ", "
588                  << DepLoc.Size << ")"
589                  << *Inst << '\n');
590
591            Value* DepWriteLength = DepIntrinsic->getLength();
592            Value* TrimmedLength = ConstantInt::get(DepWriteLength->getType(),
593                                                    InstWriteOffset -
594                                                    DepWriteOffset);
595            DepIntrinsic->setLength(TrimmedLength);
596            MadeChange = true;
597          }
598        }
599      }
600
601      // If this is a may-aliased store that is clobbering the store value, we
602      // can keep searching past it for another must-aliased pointer that stores
603      // to the same location.  For example, in:
604      //   store -> P
605      //   store -> Q
606      //   store -> P
607      // we can remove the first store to P even though we don't know if P and Q
608      // alias.
609      if (DepWrite == &BB.front()) break;
610
611      // Can't look past this instruction if it might read 'Loc'.
612      if (AA->getModRefInfo(DepWrite, Loc) & AliasAnalysis::Ref)
613        break;
614
615      InstDep = MD->getPointerDependencyFrom(Loc, false, DepWrite, &BB);
616    }
617  }
618
619  // If this block ends in a return, unwind, or unreachable, all allocas are
620  // dead at its end, which means stores to them are also dead.
621  if (BB.getTerminator()->getNumSuccessors() == 0)
622    MadeChange |= handleEndBlock(BB);
623
624  return MadeChange;
625}
626
627/// Find all blocks that will unconditionally lead to the block BB and append
628/// them to F.
629static void FindUnconditionalPreds(SmallVectorImpl<BasicBlock *> &Blocks,
630                                   BasicBlock *BB, DominatorTree *DT) {
631  for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
632    BasicBlock *Pred = *I;
633    if (Pred == BB) continue;
634    TerminatorInst *PredTI = Pred->getTerminator();
635    if (PredTI->getNumSuccessors() != 1)
636      continue;
637
638    if (DT->isReachableFromEntry(Pred))
639      Blocks.push_back(Pred);
640  }
641}
642
643/// HandleFree - Handle frees of entire structures whose dependency is a store
644/// to a field of that structure.
645bool DSE::HandleFree(CallInst *F) {
646  bool MadeChange = false;
647
648  AliasAnalysis::Location Loc = AliasAnalysis::Location(F->getOperand(0));
649  SmallVector<BasicBlock *, 16> Blocks;
650  Blocks.push_back(F->getParent());
651  const DataLayout &DL = F->getModule()->getDataLayout();
652
653  while (!Blocks.empty()) {
654    BasicBlock *BB = Blocks.pop_back_val();
655    Instruction *InstPt = BB->getTerminator();
656    if (BB == F->getParent()) InstPt = F;
657
658    MemDepResult Dep = MD->getPointerDependencyFrom(Loc, false, InstPt, BB);
659    while (Dep.isDef() || Dep.isClobber()) {
660      Instruction *Dependency = Dep.getInst();
661      if (!hasMemoryWrite(Dependency, TLI) || !isRemovable(Dependency))
662        break;
663
664      Value *DepPointer =
665          GetUnderlyingObject(getStoredPointerOperand(Dependency), DL);
666
667      // Check for aliasing.
668      if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
669        break;
670
671      Instruction *Next = std::next(BasicBlock::iterator(Dependency));
672
673      // DCE instructions only used to calculate that store
674      DeleteDeadInstruction(Dependency, *MD, TLI);
675      ++NumFastStores;
676      MadeChange = true;
677
678      // Inst's old Dependency is now deleted. Compute the next dependency,
679      // which may also be dead, as in
680      //    s[0] = 0;
681      //    s[1] = 0; // This has just been deleted.
682      //    free(s);
683      Dep = MD->getPointerDependencyFrom(Loc, false, Next, BB);
684    }
685
686    if (Dep.isNonLocal())
687      FindUnconditionalPreds(Blocks, BB, DT);
688  }
689
690  return MadeChange;
691}
692
693/// handleEndBlock - Remove dead stores to stack-allocated locations in the
694/// function end block.  Ex:
695/// %A = alloca i32
696/// ...
697/// store i32 1, i32* %A
698/// ret void
699bool DSE::handleEndBlock(BasicBlock &BB) {
700  bool MadeChange = false;
701
702  // Keep track of all of the stack objects that are dead at the end of the
703  // function.
704  SmallSetVector<Value*, 16> DeadStackObjects;
705
706  // Find all of the alloca'd pointers in the entry block.
707  BasicBlock *Entry = BB.getParent()->begin();
708  for (BasicBlock::iterator I = Entry->begin(), E = Entry->end(); I != E; ++I) {
709    if (isa<AllocaInst>(I))
710      DeadStackObjects.insert(I);
711
712    // Okay, so these are dead heap objects, but if the pointer never escapes
713    // then it's leaked by this function anyways.
714    else if (isAllocLikeFn(I, TLI) && !PointerMayBeCaptured(I, true, true))
715      DeadStackObjects.insert(I);
716  }
717
718  // Treat byval or inalloca arguments the same, stores to them are dead at the
719  // end of the function.
720  for (Function::arg_iterator AI = BB.getParent()->arg_begin(),
721       AE = BB.getParent()->arg_end(); AI != AE; ++AI)
722    if (AI->hasByValOrInAllocaAttr())
723      DeadStackObjects.insert(AI);
724
725  const DataLayout &DL = BB.getModule()->getDataLayout();
726
727  // Scan the basic block backwards
728  for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
729    --BBI;
730
731    // If we find a store, check to see if it points into a dead stack value.
732    if (hasMemoryWrite(BBI, TLI) && isRemovable(BBI)) {
733      // See through pointer-to-pointer bitcasts
734      SmallVector<Value *, 4> Pointers;
735      GetUnderlyingObjects(getStoredPointerOperand(BBI), Pointers, DL);
736
737      // Stores to stack values are valid candidates for removal.
738      bool AllDead = true;
739      for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
740           E = Pointers.end(); I != E; ++I)
741        if (!DeadStackObjects.count(*I)) {
742          AllDead = false;
743          break;
744        }
745
746      if (AllDead) {
747        Instruction *Dead = BBI++;
748
749        DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n  DEAD: "
750                     << *Dead << "\n  Objects: ";
751              for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
752                   E = Pointers.end(); I != E; ++I) {
753                dbgs() << **I;
754                if (std::next(I) != E)
755                  dbgs() << ", ";
756              }
757              dbgs() << '\n');
758
759        // DCE instructions only used to calculate that store.
760        DeleteDeadInstruction(Dead, *MD, TLI, &DeadStackObjects);
761        ++NumFastStores;
762        MadeChange = true;
763        continue;
764      }
765    }
766
767    // Remove any dead non-memory-mutating instructions.
768    if (isInstructionTriviallyDead(BBI, TLI)) {
769      Instruction *Inst = BBI++;
770      DeleteDeadInstruction(Inst, *MD, TLI, &DeadStackObjects);
771      ++NumFastOther;
772      MadeChange = true;
773      continue;
774    }
775
776    if (isa<AllocaInst>(BBI)) {
777      // Remove allocas from the list of dead stack objects; there can't be
778      // any references before the definition.
779      DeadStackObjects.remove(BBI);
780      continue;
781    }
782
783    if (auto CS = CallSite(BBI)) {
784      // Remove allocation function calls from the list of dead stack objects;
785      // there can't be any references before the definition.
786      if (isAllocLikeFn(BBI, TLI))
787        DeadStackObjects.remove(BBI);
788
789      // If this call does not access memory, it can't be loading any of our
790      // pointers.
791      if (AA->doesNotAccessMemory(CS))
792        continue;
793
794      // If the call might load from any of our allocas, then any store above
795      // the call is live.
796      DeadStackObjects.remove_if([&](Value *I) {
797        // See if the call site touches the value.
798        AliasAnalysis::ModRefResult A = AA->getModRefInfo(
799            CS, I, getPointerSize(I, DL, AA->getTargetLibraryInfo()));
800
801        return A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref;
802      });
803
804      // If all of the allocas were clobbered by the call then we're not going
805      // to find anything else to process.
806      if (DeadStackObjects.empty())
807        break;
808
809      continue;
810    }
811
812    AliasAnalysis::Location LoadedLoc;
813
814    // If we encounter a use of the pointer, it is no longer considered dead
815    if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
816      if (!L->isUnordered()) // Be conservative with atomic/volatile load
817        break;
818      LoadedLoc = AA->getLocation(L);
819    } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
820      LoadedLoc = AA->getLocation(V);
821    } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(BBI)) {
822      LoadedLoc = AA->getLocationForSource(MTI);
823    } else if (!BBI->mayReadFromMemory()) {
824      // Instruction doesn't read memory.  Note that stores that weren't removed
825      // above will hit this case.
826      continue;
827    } else {
828      // Unknown inst; assume it clobbers everything.
829      break;
830    }
831
832    // Remove any allocas from the DeadPointer set that are loaded, as this
833    // makes any stores above the access live.
834    RemoveAccessedObjects(LoadedLoc, DeadStackObjects, DL);
835
836    // If all of the allocas were clobbered by the access then we're not going
837    // to find anything else to process.
838    if (DeadStackObjects.empty())
839      break;
840  }
841
842  return MadeChange;
843}
844
845/// RemoveAccessedObjects - Check to see if the specified location may alias any
846/// of the stack objects in the DeadStackObjects set.  If so, they become live
847/// because the location is being loaded.
848void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
849                                SmallSetVector<Value *, 16> &DeadStackObjects,
850                                const DataLayout &DL) {
851  const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr, DL);
852
853  // A constant can't be in the dead pointer set.
854  if (isa<Constant>(UnderlyingPointer))
855    return;
856
857  // If the kill pointer can be easily reduced to an alloca, don't bother doing
858  // extraneous AA queries.
859  if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) {
860    DeadStackObjects.remove(const_cast<Value*>(UnderlyingPointer));
861    return;
862  }
863
864  // Remove objects that could alias LoadedLoc.
865  DeadStackObjects.remove_if([&](Value *I) {
866    // See if the loaded location could alias the stack location.
867    AliasAnalysis::Location StackLoc(
868        I, getPointerSize(I, DL, AA->getTargetLibraryInfo()));
869    return !AA->isNoAlias(StackLoc, LoadedLoc);
870  });
871}
872