1//===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass performs various transformations related to eliminating memcpy
11// calls, or transforming sets of stores into memset's.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/Transforms/Scalar.h"
16#include "llvm/ADT/SmallVector.h"
17#include "llvm/ADT/Statistic.h"
18#include "llvm/Analysis/AliasAnalysis.h"
19#include "llvm/Analysis/AssumptionCache.h"
20#include "llvm/Analysis/GlobalsModRef.h"
21#include "llvm/Analysis/MemoryDependenceAnalysis.h"
22#include "llvm/Analysis/TargetLibraryInfo.h"
23#include "llvm/Analysis/ValueTracking.h"
24#include "llvm/IR/DataLayout.h"
25#include "llvm/IR/Dominators.h"
26#include "llvm/IR/GetElementPtrTypeIterator.h"
27#include "llvm/IR/GlobalVariable.h"
28#include "llvm/IR/IRBuilder.h"
29#include "llvm/IR/Instructions.h"
30#include "llvm/IR/IntrinsicInst.h"
31#include "llvm/Support/Debug.h"
32#include "llvm/Support/raw_ostream.h"
33#include "llvm/Transforms/Utils/Local.h"
34#include <algorithm>
35using namespace llvm;
36
37#define DEBUG_TYPE "memcpyopt"
38
39STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted");
40STATISTIC(NumMemSetInfer, "Number of memsets inferred");
41STATISTIC(NumMoveToCpy,   "Number of memmoves converted to memcpy");
42STATISTIC(NumCpyToSet,    "Number of memcpys converted to memset");
43
44static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx,
45                                  bool &VariableIdxFound,
46                                  const DataLayout &DL) {
47  // Skip over the first indices.
48  gep_type_iterator GTI = gep_type_begin(GEP);
49  for (unsigned i = 1; i != Idx; ++i, ++GTI)
50    /*skip along*/;
51
52  // Compute the offset implied by the rest of the indices.
53  int64_t Offset = 0;
54  for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
55    ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
56    if (!OpC)
57      return VariableIdxFound = true;
58    if (OpC->isZero()) continue;  // No offset.
59
60    // Handle struct indices, which add their field offset to the pointer.
61    if (StructType *STy = dyn_cast<StructType>(*GTI)) {
62      Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
63      continue;
64    }
65
66    // Otherwise, we have a sequential type like an array or vector.  Multiply
67    // the index by the ElementSize.
68    uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
69    Offset += Size*OpC->getSExtValue();
70  }
71
72  return Offset;
73}
74
75/// Return true if Ptr1 is provably equal to Ptr2 plus a constant offset, and
76/// return that constant offset. For example, Ptr1 might be &A[42], and Ptr2
77/// might be &A[40]. In this case offset would be -8.
78static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
79                            const DataLayout &DL) {
80  Ptr1 = Ptr1->stripPointerCasts();
81  Ptr2 = Ptr2->stripPointerCasts();
82
83  // Handle the trivial case first.
84  if (Ptr1 == Ptr2) {
85    Offset = 0;
86    return true;
87  }
88
89  GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
90  GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
91
92  bool VariableIdxFound = false;
93
94  // If one pointer is a GEP and the other isn't, then see if the GEP is a
95  // constant offset from the base, as in "P" and "gep P, 1".
96  if (GEP1 && !GEP2 && GEP1->getOperand(0)->stripPointerCasts() == Ptr2) {
97    Offset = -GetOffsetFromIndex(GEP1, 1, VariableIdxFound, DL);
98    return !VariableIdxFound;
99  }
100
101  if (GEP2 && !GEP1 && GEP2->getOperand(0)->stripPointerCasts() == Ptr1) {
102    Offset = GetOffsetFromIndex(GEP2, 1, VariableIdxFound, DL);
103    return !VariableIdxFound;
104  }
105
106  // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
107  // base.  After that base, they may have some number of common (and
108  // potentially variable) indices.  After that they handle some constant
109  // offset, which determines their offset from each other.  At this point, we
110  // handle no other case.
111  if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
112    return false;
113
114  // Skip any common indices and track the GEP types.
115  unsigned Idx = 1;
116  for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
117    if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
118      break;
119
120  int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, DL);
121  int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, DL);
122  if (VariableIdxFound) return false;
123
124  Offset = Offset2-Offset1;
125  return true;
126}
127
128
129/// Represents a range of memset'd bytes with the ByteVal value.
130/// This allows us to analyze stores like:
131///   store 0 -> P+1
132///   store 0 -> P+0
133///   store 0 -> P+3
134///   store 0 -> P+2
135/// which sometimes happens with stores to arrays of structs etc.  When we see
136/// the first store, we make a range [1, 2).  The second store extends the range
137/// to [0, 2).  The third makes a new range [2, 3).  The fourth store joins the
138/// two ranges into [0, 3) which is memset'able.
139namespace {
140struct MemsetRange {
141  // Start/End - A semi range that describes the span that this range covers.
142  // The range is closed at the start and open at the end: [Start, End).
143  int64_t Start, End;
144
145  /// StartPtr - The getelementptr instruction that points to the start of the
146  /// range.
147  Value *StartPtr;
148
149  /// Alignment - The known alignment of the first store.
150  unsigned Alignment;
151
152  /// TheStores - The actual stores that make up this range.
153  SmallVector<Instruction*, 16> TheStores;
154
155  bool isProfitableToUseMemset(const DataLayout &DL) const;
156};
157} // end anon namespace
158
159bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const {
160  // If we found more than 4 stores to merge or 16 bytes, use memset.
161  if (TheStores.size() >= 4 || End-Start >= 16) return true;
162
163  // If there is nothing to merge, don't do anything.
164  if (TheStores.size() < 2) return false;
165
166  // If any of the stores are a memset, then it is always good to extend the
167  // memset.
168  for (Instruction *SI : TheStores)
169    if (!isa<StoreInst>(SI))
170      return true;
171
172  // Assume that the code generator is capable of merging pairs of stores
173  // together if it wants to.
174  if (TheStores.size() == 2) return false;
175
176  // If we have fewer than 8 stores, it can still be worthwhile to do this.
177  // For example, merging 4 i8 stores into an i32 store is useful almost always.
178  // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the
179  // memset will be split into 2 32-bit stores anyway) and doing so can
180  // pessimize the llvm optimizer.
181  //
182  // Since we don't have perfect knowledge here, make some assumptions: assume
183  // the maximum GPR width is the same size as the largest legal integer
184  // size. If so, check to see whether we will end up actually reducing the
185  // number of stores used.
186  unsigned Bytes = unsigned(End-Start);
187  unsigned MaxIntSize = DL.getLargestLegalIntTypeSize();
188  if (MaxIntSize == 0)
189    MaxIntSize = 1;
190  unsigned NumPointerStores = Bytes / MaxIntSize;
191
192  // Assume the remaining bytes if any are done a byte at a time.
193  unsigned NumByteStores = Bytes % MaxIntSize;
194
195  // If we will reduce the # stores (according to this heuristic), do the
196  // transformation.  This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
197  // etc.
198  return TheStores.size() > NumPointerStores+NumByteStores;
199}
200
201
202namespace {
203class MemsetRanges {
204  /// A sorted list of the memset ranges.
205  SmallVector<MemsetRange, 8> Ranges;
206  typedef SmallVectorImpl<MemsetRange>::iterator range_iterator;
207  const DataLayout &DL;
208public:
209  MemsetRanges(const DataLayout &DL) : DL(DL) {}
210
211  typedef SmallVectorImpl<MemsetRange>::const_iterator const_iterator;
212  const_iterator begin() const { return Ranges.begin(); }
213  const_iterator end() const { return Ranges.end(); }
214  bool empty() const { return Ranges.empty(); }
215
216  void addInst(int64_t OffsetFromFirst, Instruction *Inst) {
217    if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
218      addStore(OffsetFromFirst, SI);
219    else
220      addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst));
221  }
222
223  void addStore(int64_t OffsetFromFirst, StoreInst *SI) {
224    int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType());
225
226    addRange(OffsetFromFirst, StoreSize,
227             SI->getPointerOperand(), SI->getAlignment(), SI);
228  }
229
230  void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) {
231    int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue();
232    addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getAlignment(), MSI);
233  }
234
235  void addRange(int64_t Start, int64_t Size, Value *Ptr,
236                unsigned Alignment, Instruction *Inst);
237
238};
239
240} // end anon namespace
241
242
243/// Add a new store to the MemsetRanges data structure.  This adds a
244/// new range for the specified store at the specified offset, merging into
245/// existing ranges as appropriate.
246void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr,
247                            unsigned Alignment, Instruction *Inst) {
248  int64_t End = Start+Size;
249
250  range_iterator I = std::lower_bound(Ranges.begin(), Ranges.end(), Start,
251    [](const MemsetRange &LHS, int64_t RHS) { return LHS.End < RHS; });
252
253  // We now know that I == E, in which case we didn't find anything to merge
254  // with, or that Start <= I->End.  If End < I->Start or I == E, then we need
255  // to insert a new range.  Handle this now.
256  if (I == Ranges.end() || End < I->Start) {
257    MemsetRange &R = *Ranges.insert(I, MemsetRange());
258    R.Start        = Start;
259    R.End          = End;
260    R.StartPtr     = Ptr;
261    R.Alignment    = Alignment;
262    R.TheStores.push_back(Inst);
263    return;
264  }
265
266  // This store overlaps with I, add it.
267  I->TheStores.push_back(Inst);
268
269  // At this point, we may have an interval that completely contains our store.
270  // If so, just add it to the interval and return.
271  if (I->Start <= Start && I->End >= End)
272    return;
273
274  // Now we know that Start <= I->End and End >= I->Start so the range overlaps
275  // but is not entirely contained within the range.
276
277  // See if the range extends the start of the range.  In this case, it couldn't
278  // possibly cause it to join the prior range, because otherwise we would have
279  // stopped on *it*.
280  if (Start < I->Start) {
281    I->Start = Start;
282    I->StartPtr = Ptr;
283    I->Alignment = Alignment;
284  }
285
286  // Now we know that Start <= I->End and Start >= I->Start (so the startpoint
287  // is in or right at the end of I), and that End >= I->Start.  Extend I out to
288  // End.
289  if (End > I->End) {
290    I->End = End;
291    range_iterator NextI = I;
292    while (++NextI != Ranges.end() && End >= NextI->Start) {
293      // Merge the range in.
294      I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end());
295      if (NextI->End > I->End)
296        I->End = NextI->End;
297      Ranges.erase(NextI);
298      NextI = I;
299    }
300  }
301}
302
303//===----------------------------------------------------------------------===//
304//                         MemCpyOpt Pass
305//===----------------------------------------------------------------------===//
306
307namespace {
308  class MemCpyOpt : public FunctionPass {
309    MemoryDependenceAnalysis *MD;
310    TargetLibraryInfo *TLI;
311  public:
312    static char ID; // Pass identification, replacement for typeid
313    MemCpyOpt() : FunctionPass(ID) {
314      initializeMemCpyOptPass(*PassRegistry::getPassRegistry());
315      MD = nullptr;
316      TLI = nullptr;
317    }
318
319    bool runOnFunction(Function &F) override;
320
321  private:
322    // This transformation requires dominator postdominator info
323    void getAnalysisUsage(AnalysisUsage &AU) const override {
324      AU.setPreservesCFG();
325      AU.addRequired<AssumptionCacheTracker>();
326      AU.addRequired<DominatorTreeWrapperPass>();
327      AU.addRequired<MemoryDependenceAnalysis>();
328      AU.addRequired<AAResultsWrapperPass>();
329      AU.addRequired<TargetLibraryInfoWrapperPass>();
330      AU.addPreserved<GlobalsAAWrapperPass>();
331      AU.addPreserved<MemoryDependenceAnalysis>();
332    }
333
334    // Helper functions
335    bool processStore(StoreInst *SI, BasicBlock::iterator &BBI);
336    bool processMemSet(MemSetInst *SI, BasicBlock::iterator &BBI);
337    bool processMemCpy(MemCpyInst *M);
338    bool processMemMove(MemMoveInst *M);
339    bool performCallSlotOptzn(Instruction *cpy, Value *cpyDst, Value *cpySrc,
340                              uint64_t cpyLen, unsigned cpyAlign, CallInst *C);
341    bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep);
342    bool processMemSetMemCpyDependence(MemCpyInst *M, MemSetInst *MDep);
343    bool performMemCpyToMemSetOptzn(MemCpyInst *M, MemSetInst *MDep);
344    bool processByValArgument(CallSite CS, unsigned ArgNo);
345    Instruction *tryMergingIntoMemset(Instruction *I, Value *StartPtr,
346                                      Value *ByteVal);
347
348    bool iterateOnFunction(Function &F);
349  };
350
351  char MemCpyOpt::ID = 0;
352}
353
354/// The public interface to this file...
355FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOpt(); }
356
357INITIALIZE_PASS_BEGIN(MemCpyOpt, "memcpyopt", "MemCpy Optimization",
358                      false, false)
359INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
360INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
361INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
362INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
363INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
364INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
365INITIALIZE_PASS_END(MemCpyOpt, "memcpyopt", "MemCpy Optimization",
366                    false, false)
367
368/// When scanning forward over instructions, we look for some other patterns to
369/// fold away. In particular, this looks for stores to neighboring locations of
370/// memory. If it sees enough consecutive ones, it attempts to merge them
371/// together into a memcpy/memset.
372Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
373                                             Value *StartPtr, Value *ByteVal) {
374  const DataLayout &DL = StartInst->getModule()->getDataLayout();
375
376  // Okay, so we now have a single store that can be splatable.  Scan to find
377  // all subsequent stores of the same value to offset from the same pointer.
378  // Join these together into ranges, so we can decide whether contiguous blocks
379  // are stored.
380  MemsetRanges Ranges(DL);
381
382  BasicBlock::iterator BI(StartInst);
383  for (++BI; !isa<TerminatorInst>(BI); ++BI) {
384    if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) {
385      // If the instruction is readnone, ignore it, otherwise bail out.  We
386      // don't even allow readonly here because we don't want something like:
387      // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
388      if (BI->mayWriteToMemory() || BI->mayReadFromMemory())
389        break;
390      continue;
391    }
392
393    if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
394      // If this is a store, see if we can merge it in.
395      if (!NextStore->isSimple()) break;
396
397      // Check to see if this stored value is of the same byte-splattable value.
398      if (ByteVal != isBytewiseValue(NextStore->getOperand(0)))
399        break;
400
401      // Check to see if this store is to a constant offset from the start ptr.
402      int64_t Offset;
403      if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset,
404                           DL))
405        break;
406
407      Ranges.addStore(Offset, NextStore);
408    } else {
409      MemSetInst *MSI = cast<MemSetInst>(BI);
410
411      if (MSI->isVolatile() || ByteVal != MSI->getValue() ||
412          !isa<ConstantInt>(MSI->getLength()))
413        break;
414
415      // Check to see if this store is to a constant offset from the start ptr.
416      int64_t Offset;
417      if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, DL))
418        break;
419
420      Ranges.addMemSet(Offset, MSI);
421    }
422  }
423
424  // If we have no ranges, then we just had a single store with nothing that
425  // could be merged in.  This is a very common case of course.
426  if (Ranges.empty())
427    return nullptr;
428
429  // If we had at least one store that could be merged in, add the starting
430  // store as well.  We try to avoid this unless there is at least something
431  // interesting as a small compile-time optimization.
432  Ranges.addInst(0, StartInst);
433
434  // If we create any memsets, we put it right before the first instruction that
435  // isn't part of the memset block.  This ensure that the memset is dominated
436  // by any addressing instruction needed by the start of the block.
437  IRBuilder<> Builder(&*BI);
438
439  // Now that we have full information about ranges, loop over the ranges and
440  // emit memset's for anything big enough to be worthwhile.
441  Instruction *AMemSet = nullptr;
442  for (const MemsetRange &Range : Ranges) {
443
444    if (Range.TheStores.size() == 1) continue;
445
446    // If it is profitable to lower this range to memset, do so now.
447    if (!Range.isProfitableToUseMemset(DL))
448      continue;
449
450    // Otherwise, we do want to transform this!  Create a new memset.
451    // Get the starting pointer of the block.
452    StartPtr = Range.StartPtr;
453
454    // Determine alignment
455    unsigned Alignment = Range.Alignment;
456    if (Alignment == 0) {
457      Type *EltType =
458        cast<PointerType>(StartPtr->getType())->getElementType();
459      Alignment = DL.getABITypeAlignment(EltType);
460    }
461
462    AMemSet =
463      Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment);
464
465    DEBUG(dbgs() << "Replace stores:\n";
466          for (Instruction *SI : Range.TheStores)
467            dbgs() << *SI << '\n';
468          dbgs() << "With: " << *AMemSet << '\n');
469
470    if (!Range.TheStores.empty())
471      AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc());
472
473    // Zap all the stores.
474    for (Instruction *SI : Range.TheStores) {
475      MD->removeInstruction(SI);
476      SI->eraseFromParent();
477    }
478    ++NumMemSetInfer;
479  }
480
481  return AMemSet;
482}
483
484
485bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
486  if (!SI->isSimple()) return false;
487
488  // Avoid merging nontemporal stores since the resulting
489  // memcpy/memset would not be able to preserve the nontemporal hint.
490  // In theory we could teach how to propagate the !nontemporal metadata to
491  // memset calls. However, that change would force the backend to
492  // conservatively expand !nontemporal memset calls back to sequences of
493  // store instructions (effectively undoing the merging).
494  if (SI->getMetadata(LLVMContext::MD_nontemporal))
495    return false;
496
497  const DataLayout &DL = SI->getModule()->getDataLayout();
498
499  // Detect cases where we're performing call slot forwarding, but
500  // happen to be using a load-store pair to implement it, rather than
501  // a memcpy.
502  if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) {
503    if (LI->isSimple() && LI->hasOneUse() &&
504        LI->getParent() == SI->getParent()) {
505      MemDepResult ldep = MD->getDependency(LI);
506      CallInst *C = nullptr;
507      if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst()))
508        C = dyn_cast<CallInst>(ldep.getInst());
509
510      if (C) {
511        // Check that nothing touches the dest of the "copy" between
512        // the call and the store.
513        AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
514        MemoryLocation StoreLoc = MemoryLocation::get(SI);
515        for (BasicBlock::iterator I = --SI->getIterator(), E = C->getIterator();
516             I != E; --I) {
517          if (AA.getModRefInfo(&*I, StoreLoc) != MRI_NoModRef) {
518            C = nullptr;
519            break;
520          }
521        }
522      }
523
524      if (C) {
525        unsigned storeAlign = SI->getAlignment();
526        if (!storeAlign)
527          storeAlign = DL.getABITypeAlignment(SI->getOperand(0)->getType());
528        unsigned loadAlign = LI->getAlignment();
529        if (!loadAlign)
530          loadAlign = DL.getABITypeAlignment(LI->getType());
531
532        bool changed = performCallSlotOptzn(
533            LI, SI->getPointerOperand()->stripPointerCasts(),
534            LI->getPointerOperand()->stripPointerCasts(),
535            DL.getTypeStoreSize(SI->getOperand(0)->getType()),
536            std::min(storeAlign, loadAlign), C);
537        if (changed) {
538          MD->removeInstruction(SI);
539          SI->eraseFromParent();
540          MD->removeInstruction(LI);
541          LI->eraseFromParent();
542          ++NumMemCpyInstr;
543          return true;
544        }
545      }
546    }
547  }
548
549  // There are two cases that are interesting for this code to handle: memcpy
550  // and memset.  Right now we only handle memset.
551
552  // Ensure that the value being stored is something that can be memset'able a
553  // byte at a time like "0" or "-1" or any width, as well as things like
554  // 0xA0A0A0A0 and 0.0.
555  if (Value *ByteVal = isBytewiseValue(SI->getOperand(0)))
556    if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(),
557                                              ByteVal)) {
558      BBI = I->getIterator(); // Don't invalidate iterator.
559      return true;
560    }
561
562  return false;
563}
564
565bool MemCpyOpt::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) {
566  // See if there is another memset or store neighboring this memset which
567  // allows us to widen out the memset to do a single larger store.
568  if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile())
569    if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(),
570                                              MSI->getValue())) {
571      BBI = I->getIterator(); // Don't invalidate iterator.
572      return true;
573    }
574  return false;
575}
576
577
578/// Takes a memcpy and a call that it depends on,
579/// and checks for the possibility of a call slot optimization by having
580/// the call write its result directly into the destination of the memcpy.
581bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
582                                     Value *cpyDest, Value *cpySrc,
583                                     uint64_t cpyLen, unsigned cpyAlign,
584                                     CallInst *C) {
585  // The general transformation to keep in mind is
586  //
587  //   call @func(..., src, ...)
588  //   memcpy(dest, src, ...)
589  //
590  // ->
591  //
592  //   memcpy(dest, src, ...)
593  //   call @func(..., dest, ...)
594  //
595  // Since moving the memcpy is technically awkward, we additionally check that
596  // src only holds uninitialized values at the moment of the call, meaning that
597  // the memcpy can be discarded rather than moved.
598
599  // Deliberately get the source and destination with bitcasts stripped away,
600  // because we'll need to do type comparisons based on the underlying type.
601  CallSite CS(C);
602
603  // Require that src be an alloca.  This simplifies the reasoning considerably.
604  AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc);
605  if (!srcAlloca)
606    return false;
607
608  ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
609  if (!srcArraySize)
610    return false;
611
612  const DataLayout &DL = cpy->getModule()->getDataLayout();
613  uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) *
614                     srcArraySize->getZExtValue();
615
616  if (cpyLen < srcSize)
617    return false;
618
619  // Check that accessing the first srcSize bytes of dest will not cause a
620  // trap.  Otherwise the transform is invalid since it might cause a trap
621  // to occur earlier than it otherwise would.
622  if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) {
623    // The destination is an alloca.  Check it is larger than srcSize.
624    ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize());
625    if (!destArraySize)
626      return false;
627
628    uint64_t destSize = DL.getTypeAllocSize(A->getAllocatedType()) *
629                        destArraySize->getZExtValue();
630
631    if (destSize < srcSize)
632      return false;
633  } else if (Argument *A = dyn_cast<Argument>(cpyDest)) {
634    if (A->getDereferenceableBytes() < srcSize) {
635      // If the destination is an sret parameter then only accesses that are
636      // outside of the returned struct type can trap.
637      if (!A->hasStructRetAttr())
638        return false;
639
640      Type *StructTy = cast<PointerType>(A->getType())->getElementType();
641      if (!StructTy->isSized()) {
642        // The call may never return and hence the copy-instruction may never
643        // be executed, and therefore it's not safe to say "the destination
644        // has at least <cpyLen> bytes, as implied by the copy-instruction",
645        return false;
646      }
647
648      uint64_t destSize = DL.getTypeAllocSize(StructTy);
649      if (destSize < srcSize)
650        return false;
651    }
652  } else {
653    return false;
654  }
655
656  // Check that dest points to memory that is at least as aligned as src.
657  unsigned srcAlign = srcAlloca->getAlignment();
658  if (!srcAlign)
659    srcAlign = DL.getABITypeAlignment(srcAlloca->getAllocatedType());
660  bool isDestSufficientlyAligned = srcAlign <= cpyAlign;
661  // If dest is not aligned enough and we can't increase its alignment then
662  // bail out.
663  if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest))
664    return false;
665
666  // Check that src is not accessed except via the call and the memcpy.  This
667  // guarantees that it holds only undefined values when passed in (so the final
668  // memcpy can be dropped), that it is not read or written between the call and
669  // the memcpy, and that writing beyond the end of it is undefined.
670  SmallVector<User*, 8> srcUseList(srcAlloca->user_begin(),
671                                   srcAlloca->user_end());
672  while (!srcUseList.empty()) {
673    User *U = srcUseList.pop_back_val();
674
675    if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) {
676      for (User *UU : U->users())
677        srcUseList.push_back(UU);
678      continue;
679    }
680    if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) {
681      if (!G->hasAllZeroIndices())
682        return false;
683
684      for (User *UU : U->users())
685        srcUseList.push_back(UU);
686      continue;
687    }
688    if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U))
689      if (IT->getIntrinsicID() == Intrinsic::lifetime_start ||
690          IT->getIntrinsicID() == Intrinsic::lifetime_end)
691        continue;
692
693    if (U != C && U != cpy)
694      return false;
695  }
696
697  // Check that src isn't captured by the called function since the
698  // transformation can cause aliasing issues in that case.
699  for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
700    if (CS.getArgument(i) == cpySrc && !CS.doesNotCapture(i))
701      return false;
702
703  // Since we're changing the parameter to the callsite, we need to make sure
704  // that what would be the new parameter dominates the callsite.
705  DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
706  if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest))
707    if (!DT.dominates(cpyDestInst, C))
708      return false;
709
710  // In addition to knowing that the call does not access src in some
711  // unexpected manner, for example via a global, which we deduce from
712  // the use analysis, we also need to know that it does not sneakily
713  // access dest.  We rely on AA to figure this out for us.
714  AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
715  ModRefInfo MR = AA.getModRefInfo(C, cpyDest, srcSize);
716  // If necessary, perform additional analysis.
717  if (MR != MRI_NoModRef)
718    MR = AA.callCapturesBefore(C, cpyDest, srcSize, &DT);
719  if (MR != MRI_NoModRef)
720    return false;
721
722  // All the checks have passed, so do the transformation.
723  bool changedArgument = false;
724  for (unsigned i = 0; i < CS.arg_size(); ++i)
725    if (CS.getArgument(i)->stripPointerCasts() == cpySrc) {
726      Value *Dest = cpySrc->getType() == cpyDest->getType() ?  cpyDest
727        : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(),
728                                      cpyDest->getName(), C);
729      changedArgument = true;
730      if (CS.getArgument(i)->getType() == Dest->getType())
731        CS.setArgument(i, Dest);
732      else
733        CS.setArgument(i, CastInst::CreatePointerCast(Dest,
734                          CS.getArgument(i)->getType(), Dest->getName(), C));
735    }
736
737  if (!changedArgument)
738    return false;
739
740  // If the destination wasn't sufficiently aligned then increase its alignment.
741  if (!isDestSufficientlyAligned) {
742    assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!");
743    cast<AllocaInst>(cpyDest)->setAlignment(srcAlign);
744  }
745
746  // Drop any cached information about the call, because we may have changed
747  // its dependence information by changing its parameter.
748  MD->removeInstruction(C);
749
750  // Update AA metadata
751  // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be
752  // handled here, but combineMetadata doesn't support them yet
753  unsigned KnownIDs[] = {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
754                         LLVMContext::MD_noalias,
755                         LLVMContext::MD_invariant_group};
756  combineMetadata(C, cpy, KnownIDs);
757
758  // Remove the memcpy.
759  MD->removeInstruction(cpy);
760  ++NumMemCpyInstr;
761
762  return true;
763}
764
765/// We've found that the (upward scanning) memory dependence of memcpy 'M' is
766/// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can.
767bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep) {
768  // We can only transforms memcpy's where the dest of one is the source of the
769  // other.
770  if (M->getSource() != MDep->getDest() || MDep->isVolatile())
771    return false;
772
773  // If dep instruction is reading from our current input, then it is a noop
774  // transfer and substituting the input won't change this instruction.  Just
775  // ignore the input and let someone else zap MDep.  This handles cases like:
776  //    memcpy(a <- a)
777  //    memcpy(b <- a)
778  if (M->getSource() == MDep->getSource())
779    return false;
780
781  // Second, the length of the memcpy's must be the same, or the preceding one
782  // must be larger than the following one.
783  ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength());
784  ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength());
785  if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue())
786    return false;
787
788  AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
789
790  // Verify that the copied-from memory doesn't change in between the two
791  // transfers.  For example, in:
792  //    memcpy(a <- b)
793  //    *b = 42;
794  //    memcpy(c <- a)
795  // It would be invalid to transform the second memcpy into memcpy(c <- b).
796  //
797  // TODO: If the code between M and MDep is transparent to the destination "c",
798  // then we could still perform the xform by moving M up to the first memcpy.
799  //
800  // NOTE: This is conservative, it will stop on any read from the source loc,
801  // not just the defining memcpy.
802  MemDepResult SourceDep =
803      MD->getPointerDependencyFrom(MemoryLocation::getForSource(MDep), false,
804                                   M->getIterator(), M->getParent());
805  if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
806    return false;
807
808  // If the dest of the second might alias the source of the first, then the
809  // source and dest might overlap.  We still want to eliminate the intermediate
810  // value, but we have to generate a memmove instead of memcpy.
811  bool UseMemMove = false;
812  if (!AA.isNoAlias(MemoryLocation::getForDest(M),
813                    MemoryLocation::getForSource(MDep)))
814    UseMemMove = true;
815
816  // If all checks passed, then we can transform M.
817
818  // Make sure to use the lesser of the alignment of the source and the dest
819  // since we're changing where we're reading from, but don't want to increase
820  // the alignment past what can be read from or written to.
821  // TODO: Is this worth it if we're creating a less aligned memcpy? For
822  // example we could be moving from movaps -> movq on x86.
823  unsigned Align = std::min(MDep->getAlignment(), M->getAlignment());
824
825  IRBuilder<> Builder(M);
826  if (UseMemMove)
827    Builder.CreateMemMove(M->getRawDest(), MDep->getRawSource(), M->getLength(),
828                          Align, M->isVolatile());
829  else
830    Builder.CreateMemCpy(M->getRawDest(), MDep->getRawSource(), M->getLength(),
831                         Align, M->isVolatile());
832
833  // Remove the instruction we're replacing.
834  MD->removeInstruction(M);
835  M->eraseFromParent();
836  ++NumMemCpyInstr;
837  return true;
838}
839
840/// We've found that the (upward scanning) memory dependence of \p MemCpy is
841/// \p MemSet.  Try to simplify \p MemSet to only set the trailing bytes that
842/// weren't copied over by \p MemCpy.
843///
844/// In other words, transform:
845/// \code
846///   memset(dst, c, dst_size);
847///   memcpy(dst, src, src_size);
848/// \endcode
849/// into:
850/// \code
851///   memcpy(dst, src, src_size);
852///   memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size);
853/// \endcode
854bool MemCpyOpt::processMemSetMemCpyDependence(MemCpyInst *MemCpy,
855                                              MemSetInst *MemSet) {
856  // We can only transform memset/memcpy with the same destination.
857  if (MemSet->getDest() != MemCpy->getDest())
858    return false;
859
860  // Check that there are no other dependencies on the memset destination.
861  MemDepResult DstDepInfo =
862      MD->getPointerDependencyFrom(MemoryLocation::getForDest(MemSet), false,
863                                   MemCpy->getIterator(), MemCpy->getParent());
864  if (DstDepInfo.getInst() != MemSet)
865    return false;
866
867  // Use the same i8* dest as the memcpy, killing the memset dest if different.
868  Value *Dest = MemCpy->getRawDest();
869  Value *DestSize = MemSet->getLength();
870  Value *SrcSize = MemCpy->getLength();
871
872  // By default, create an unaligned memset.
873  unsigned Align = 1;
874  // If Dest is aligned, and SrcSize is constant, use the minimum alignment
875  // of the sum.
876  const unsigned DestAlign =
877      std::max(MemSet->getAlignment(), MemCpy->getAlignment());
878  if (DestAlign > 1)
879    if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize))
880      Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign);
881
882  IRBuilder<> Builder(MemCpy);
883
884  // If the sizes have different types, zext the smaller one.
885  if (DestSize->getType() != SrcSize->getType()) {
886    if (DestSize->getType()->getIntegerBitWidth() >
887        SrcSize->getType()->getIntegerBitWidth())
888      SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType());
889    else
890      DestSize = Builder.CreateZExt(DestSize, SrcSize->getType());
891  }
892
893  Value *MemsetLen =
894      Builder.CreateSelect(Builder.CreateICmpULE(DestSize, SrcSize),
895                           ConstantInt::getNullValue(DestSize->getType()),
896                           Builder.CreateSub(DestSize, SrcSize));
897  Builder.CreateMemSet(Builder.CreateGEP(Dest, SrcSize), MemSet->getOperand(1),
898                       MemsetLen, Align);
899
900  MD->removeInstruction(MemSet);
901  MemSet->eraseFromParent();
902  return true;
903}
904
905/// Transform memcpy to memset when its source was just memset.
906/// In other words, turn:
907/// \code
908///   memset(dst1, c, dst1_size);
909///   memcpy(dst2, dst1, dst2_size);
910/// \endcode
911/// into:
912/// \code
913///   memset(dst1, c, dst1_size);
914///   memset(dst2, c, dst2_size);
915/// \endcode
916/// When dst2_size <= dst1_size.
917///
918/// The \p MemCpy must have a Constant length.
919bool MemCpyOpt::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy,
920                                           MemSetInst *MemSet) {
921  // This only makes sense on memcpy(..., memset(...), ...).
922  if (MemSet->getRawDest() != MemCpy->getRawSource())
923    return false;
924
925  ConstantInt *CopySize = cast<ConstantInt>(MemCpy->getLength());
926  ConstantInt *MemSetSize = dyn_cast<ConstantInt>(MemSet->getLength());
927  // Make sure the memcpy doesn't read any more than what the memset wrote.
928  // Don't worry about sizes larger than i64.
929  if (!MemSetSize || CopySize->getZExtValue() > MemSetSize->getZExtValue())
930    return false;
931
932  IRBuilder<> Builder(MemCpy);
933  Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1),
934                       CopySize, MemCpy->getAlignment());
935  return true;
936}
937
938/// Perform simplification of memcpy's.  If we have memcpy A
939/// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite
940/// B to be a memcpy from X to Z (or potentially a memmove, depending on
941/// circumstances). This allows later passes to remove the first memcpy
942/// altogether.
943bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
944  // We can only optimize non-volatile memcpy's.
945  if (M->isVolatile()) return false;
946
947  // If the source and destination of the memcpy are the same, then zap it.
948  if (M->getSource() == M->getDest()) {
949    MD->removeInstruction(M);
950    M->eraseFromParent();
951    return false;
952  }
953
954  // If copying from a constant, try to turn the memcpy into a memset.
955  if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource()))
956    if (GV->isConstant() && GV->hasDefinitiveInitializer())
957      if (Value *ByteVal = isBytewiseValue(GV->getInitializer())) {
958        IRBuilder<> Builder(M);
959        Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(),
960                             M->getAlignment(), false);
961        MD->removeInstruction(M);
962        M->eraseFromParent();
963        ++NumCpyToSet;
964        return true;
965      }
966
967  MemDepResult DepInfo = MD->getDependency(M);
968
969  // Try to turn a partially redundant memset + memcpy into
970  // memcpy + smaller memset.  We don't need the memcpy size for this.
971  if (DepInfo.isClobber())
972    if (MemSetInst *MDep = dyn_cast<MemSetInst>(DepInfo.getInst()))
973      if (processMemSetMemCpyDependence(M, MDep))
974        return true;
975
976  // The optimizations after this point require the memcpy size.
977  ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength());
978  if (!CopySize) return false;
979
980  // There are four possible optimizations we can do for memcpy:
981  //   a) memcpy-memcpy xform which exposes redundance for DSE.
982  //   b) call-memcpy xform for return slot optimization.
983  //   c) memcpy from freshly alloca'd space or space that has just started its
984  //      lifetime copies undefined data, and we can therefore eliminate the
985  //      memcpy in favor of the data that was already at the destination.
986  //   d) memcpy from a just-memset'd source can be turned into memset.
987  if (DepInfo.isClobber()) {
988    if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) {
989      if (performCallSlotOptzn(M, M->getDest(), M->getSource(),
990                               CopySize->getZExtValue(), M->getAlignment(),
991                               C)) {
992        MD->removeInstruction(M);
993        M->eraseFromParent();
994        return true;
995      }
996    }
997  }
998
999  MemoryLocation SrcLoc = MemoryLocation::getForSource(M);
1000  MemDepResult SrcDepInfo = MD->getPointerDependencyFrom(
1001      SrcLoc, true, M->getIterator(), M->getParent());
1002
1003  if (SrcDepInfo.isClobber()) {
1004    if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst()))
1005      return processMemCpyMemCpyDependence(M, MDep);
1006  } else if (SrcDepInfo.isDef()) {
1007    Instruction *I = SrcDepInfo.getInst();
1008    bool hasUndefContents = false;
1009
1010    if (isa<AllocaInst>(I)) {
1011      hasUndefContents = true;
1012    } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1013      if (II->getIntrinsicID() == Intrinsic::lifetime_start)
1014        if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0)))
1015          if (LTSize->getZExtValue() >= CopySize->getZExtValue())
1016            hasUndefContents = true;
1017    }
1018
1019    if (hasUndefContents) {
1020      MD->removeInstruction(M);
1021      M->eraseFromParent();
1022      ++NumMemCpyInstr;
1023      return true;
1024    }
1025  }
1026
1027  if (SrcDepInfo.isClobber())
1028    if (MemSetInst *MDep = dyn_cast<MemSetInst>(SrcDepInfo.getInst()))
1029      if (performMemCpyToMemSetOptzn(M, MDep)) {
1030        MD->removeInstruction(M);
1031        M->eraseFromParent();
1032        ++NumCpyToSet;
1033        return true;
1034      }
1035
1036  return false;
1037}
1038
1039/// Transforms memmove calls to memcpy calls when the src/dst are guaranteed
1040/// not to alias.
1041bool MemCpyOpt::processMemMove(MemMoveInst *M) {
1042  AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
1043
1044  if (!TLI->has(LibFunc::memmove))
1045    return false;
1046
1047  // See if the pointers alias.
1048  if (!AA.isNoAlias(MemoryLocation::getForDest(M),
1049                    MemoryLocation::getForSource(M)))
1050    return false;
1051
1052  DEBUG(dbgs() << "MemCpyOpt: Optimizing memmove -> memcpy: " << *M << "\n");
1053
1054  // If not, then we know we can transform this.
1055  Type *ArgTys[3] = { M->getRawDest()->getType(),
1056                      M->getRawSource()->getType(),
1057                      M->getLength()->getType() };
1058  M->setCalledFunction(Intrinsic::getDeclaration(M->getModule(),
1059                                                 Intrinsic::memcpy, ArgTys));
1060
1061  // MemDep may have over conservative information about this instruction, just
1062  // conservatively flush it from the cache.
1063  MD->removeInstruction(M);
1064
1065  ++NumMoveToCpy;
1066  return true;
1067}
1068
1069/// This is called on every byval argument in call sites.
1070bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) {
1071  const DataLayout &DL = CS.getCaller()->getParent()->getDataLayout();
1072  // Find out what feeds this byval argument.
1073  Value *ByValArg = CS.getArgument(ArgNo);
1074  Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType();
1075  uint64_t ByValSize = DL.getTypeAllocSize(ByValTy);
1076  MemDepResult DepInfo = MD->getPointerDependencyFrom(
1077      MemoryLocation(ByValArg, ByValSize), true,
1078      CS.getInstruction()->getIterator(), CS.getInstruction()->getParent());
1079  if (!DepInfo.isClobber())
1080    return false;
1081
1082  // If the byval argument isn't fed by a memcpy, ignore it.  If it is fed by
1083  // a memcpy, see if we can byval from the source of the memcpy instead of the
1084  // result.
1085  MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst());
1086  if (!MDep || MDep->isVolatile() ||
1087      ByValArg->stripPointerCasts() != MDep->getDest())
1088    return false;
1089
1090  // The length of the memcpy must be larger or equal to the size of the byval.
1091  ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
1092  if (!C1 || C1->getValue().getZExtValue() < ByValSize)
1093    return false;
1094
1095  // Get the alignment of the byval.  If the call doesn't specify the alignment,
1096  // then it is some target specific value that we can't know.
1097  unsigned ByValAlign = CS.getParamAlignment(ArgNo+1);
1098  if (ByValAlign == 0) return false;
1099
1100  // If it is greater than the memcpy, then we check to see if we can force the
1101  // source of the memcpy to the alignment we need.  If we fail, we bail out.
1102  AssumptionCache &AC =
1103      getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
1104          *CS->getParent()->getParent());
1105  DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1106  if (MDep->getAlignment() < ByValAlign &&
1107      getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL,
1108                                 CS.getInstruction(), &AC, &DT) < ByValAlign)
1109    return false;
1110
1111  // Verify that the copied-from memory doesn't change in between the memcpy and
1112  // the byval call.
1113  //    memcpy(a <- b)
1114  //    *b = 42;
1115  //    foo(*a)
1116  // It would be invalid to transform the second memcpy into foo(*b).
1117  //
1118  // NOTE: This is conservative, it will stop on any read from the source loc,
1119  // not just the defining memcpy.
1120  MemDepResult SourceDep = MD->getPointerDependencyFrom(
1121      MemoryLocation::getForSource(MDep), false,
1122      CS.getInstruction()->getIterator(), MDep->getParent());
1123  if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
1124    return false;
1125
1126  Value *TmpCast = MDep->getSource();
1127  if (MDep->getSource()->getType() != ByValArg->getType())
1128    TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(),
1129                              "tmpcast", CS.getInstruction());
1130
1131  DEBUG(dbgs() << "MemCpyOpt: Forwarding memcpy to byval:\n"
1132               << "  " << *MDep << "\n"
1133               << "  " << *CS.getInstruction() << "\n");
1134
1135  // Otherwise we're good!  Update the byval argument.
1136  CS.setArgument(ArgNo, TmpCast);
1137  ++NumMemCpyInstr;
1138  return true;
1139}
1140
1141/// Executes one iteration of MemCpyOpt.
1142bool MemCpyOpt::iterateOnFunction(Function &F) {
1143  bool MadeChange = false;
1144
1145  // Walk all instruction in the function.
1146  for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) {
1147    for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) {
1148      // Avoid invalidating the iterator.
1149      Instruction *I = &*BI++;
1150
1151      bool RepeatInstruction = false;
1152
1153      if (StoreInst *SI = dyn_cast<StoreInst>(I))
1154        MadeChange |= processStore(SI, BI);
1155      else if (MemSetInst *M = dyn_cast<MemSetInst>(I))
1156        RepeatInstruction = processMemSet(M, BI);
1157      else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I))
1158        RepeatInstruction = processMemCpy(M);
1159      else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I))
1160        RepeatInstruction = processMemMove(M);
1161      else if (auto CS = CallSite(I)) {
1162        for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
1163          if (CS.isByValArgument(i))
1164            MadeChange |= processByValArgument(CS, i);
1165      }
1166
1167      // Reprocess the instruction if desired.
1168      if (RepeatInstruction) {
1169        if (BI != BB->begin()) --BI;
1170        MadeChange = true;
1171      }
1172    }
1173  }
1174
1175  return MadeChange;
1176}
1177
1178/// This is the main transformation entry point for a function.
1179bool MemCpyOpt::runOnFunction(Function &F) {
1180  if (skipOptnoneFunction(F))
1181    return false;
1182
1183  bool MadeChange = false;
1184  MD = &getAnalysis<MemoryDependenceAnalysis>();
1185  TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1186
1187  // If we don't have at least memset and memcpy, there is little point of doing
1188  // anything here.  These are required by a freestanding implementation, so if
1189  // even they are disabled, there is no point in trying hard.
1190  if (!TLI->has(LibFunc::memset) || !TLI->has(LibFunc::memcpy))
1191    return false;
1192
1193  while (1) {
1194    if (!iterateOnFunction(F))
1195      break;
1196    MadeChange = true;
1197  }
1198
1199  MD = nullptr;
1200  return MadeChange;
1201}
1202