MemCpyOptimizer.cpp revision 05cd03b33559732f8ed55e5ff7554fd06d59eb6a
1//===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass performs various transformations related to eliminating memcpy
11// calls, or transforming sets of stores into memset's.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "memcpyopt"
16#include "llvm/Transforms/Scalar.h"
17#include "llvm/IntrinsicInst.h"
18#include "llvm/Instructions.h"
19#include "llvm/LLVMContext.h"
20#include "llvm/ADT/SmallVector.h"
21#include "llvm/ADT/Statistic.h"
22#include "llvm/Analysis/Dominators.h"
23#include "llvm/Analysis/AliasAnalysis.h"
24#include "llvm/Analysis/MemoryDependenceAnalysis.h"
25#include "llvm/Support/Debug.h"
26#include "llvm/Support/GetElementPtrTypeIterator.h"
27#include "llvm/Support/raw_ostream.h"
28#include "llvm/Target/TargetData.h"
29#include <list>
30using namespace llvm;
31
32STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted");
33STATISTIC(NumMemSetInfer, "Number of memsets inferred");
34STATISTIC(NumMoveToCpy,   "Number of memmoves converted to memcpy");
35
36/// isBytewiseValue - If the specified value can be set by repeating the same
37/// byte in memory, return the i8 value that it is represented with.  This is
38/// true for all i8 values obviously, but is also true for i32 0, i32 -1,
39/// i16 0xF0F0, double 0.0 etc.  If the value can't be handled with a repeated
40/// byte store (e.g. i16 0x1234), return null.
41static Value *isBytewiseValue(Value *V, LLVMContext &Context) {
42  // All byte-wide stores are splatable, even of arbitrary variables.
43  if (V->getType() == Type::getInt8Ty(Context)) return V;
44
45  // Constant float and double values can be handled as integer values if the
46  // corresponding integer value is "byteable".  An important case is 0.0.
47  if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
48    if (CFP->getType() == Type::getFloatTy(Context))
49      V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(Context));
50    if (CFP->getType() == Type::getDoubleTy(Context))
51      V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(Context));
52    // Don't handle long double formats, which have strange constraints.
53  }
54
55  // We can handle constant integers that are power of two in size and a
56  // multiple of 8 bits.
57  if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
58    unsigned Width = CI->getBitWidth();
59    if (isPowerOf2_32(Width) && Width > 8) {
60      // We can handle this value if the recursive binary decomposition is the
61      // same at all levels.
62      APInt Val = CI->getValue();
63      APInt Val2;
64      while (Val.getBitWidth() != 8) {
65        unsigned NextWidth = Val.getBitWidth()/2;
66        Val2  = Val.lshr(NextWidth);
67        Val2.trunc(Val.getBitWidth()/2);
68        Val.trunc(Val.getBitWidth()/2);
69
70        // If the top/bottom halves aren't the same, reject it.
71        if (Val != Val2)
72          return 0;
73      }
74      return ConstantInt::get(Context, Val);
75    }
76  }
77
78  // Conceptually, we could handle things like:
79  //   %a = zext i8 %X to i16
80  //   %b = shl i16 %a, 8
81  //   %c = or i16 %a, %b
82  // but until there is an example that actually needs this, it doesn't seem
83  // worth worrying about.
84  return 0;
85}
86
87static int64_t GetOffsetFromIndex(const GetElementPtrInst *GEP, unsigned Idx,
88                                  bool &VariableIdxFound, TargetData &TD) {
89  // Skip over the first indices.
90  gep_type_iterator GTI = gep_type_begin(GEP);
91  for (unsigned i = 1; i != Idx; ++i, ++GTI)
92    /*skip along*/;
93
94  // Compute the offset implied by the rest of the indices.
95  int64_t Offset = 0;
96  for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
97    ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
98    if (OpC == 0)
99      return VariableIdxFound = true;
100    if (OpC->isZero()) continue;  // No offset.
101
102    // Handle struct indices, which add their field offset to the pointer.
103    if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
104      Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
105      continue;
106    }
107
108    // Otherwise, we have a sequential type like an array or vector.  Multiply
109    // the index by the ElementSize.
110    uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
111    Offset += Size*OpC->getSExtValue();
112  }
113
114  return Offset;
115}
116
117/// IsPointerOffset - Return true if Ptr1 is provably equal to Ptr2 plus a
118/// constant offset, and return that constant offset.  For example, Ptr1 might
119/// be &A[42], and Ptr2 might be &A[40].  In this case offset would be -8.
120static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
121                            TargetData &TD) {
122  // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
123  // base.  After that base, they may have some number of common (and
124  // potentially variable) indices.  After that they handle some constant
125  // offset, which determines their offset from each other.  At this point, we
126  // handle no other case.
127  GetElementPtrInst *GEP1 = dyn_cast<GetElementPtrInst>(Ptr1);
128  GetElementPtrInst *GEP2 = dyn_cast<GetElementPtrInst>(Ptr2);
129  if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
130    return false;
131
132  // Skip any common indices and track the GEP types.
133  unsigned Idx = 1;
134  for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
135    if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
136      break;
137
138  bool VariableIdxFound = false;
139  int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, TD);
140  int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, TD);
141  if (VariableIdxFound) return false;
142
143  Offset = Offset2-Offset1;
144  return true;
145}
146
147
148/// MemsetRange - Represents a range of memset'd bytes with the ByteVal value.
149/// This allows us to analyze stores like:
150///   store 0 -> P+1
151///   store 0 -> P+0
152///   store 0 -> P+3
153///   store 0 -> P+2
154/// which sometimes happens with stores to arrays of structs etc.  When we see
155/// the first store, we make a range [1, 2).  The second store extends the range
156/// to [0, 2).  The third makes a new range [2, 3).  The fourth store joins the
157/// two ranges into [0, 3) which is memset'able.
158namespace {
159struct MemsetRange {
160  // Start/End - A semi range that describes the span that this range covers.
161  // The range is closed at the start and open at the end: [Start, End).
162  int64_t Start, End;
163
164  /// StartPtr - The getelementptr instruction that points to the start of the
165  /// range.
166  Value *StartPtr;
167
168  /// Alignment - The known alignment of the first store.
169  unsigned Alignment;
170
171  /// TheStores - The actual stores that make up this range.
172  SmallVector<StoreInst*, 16> TheStores;
173
174  bool isProfitableToUseMemset(const TargetData &TD) const;
175
176};
177} // end anon namespace
178
179bool MemsetRange::isProfitableToUseMemset(const TargetData &TD) const {
180  // If we found more than 8 stores to merge or 64 bytes, use memset.
181  if (TheStores.size() >= 8 || End-Start >= 64) return true;
182
183  // Assume that the code generator is capable of merging pairs of stores
184  // together if it wants to.
185  if (TheStores.size() <= 2) return false;
186
187  // If we have fewer than 8 stores, it can still be worthwhile to do this.
188  // For example, merging 4 i8 stores into an i32 store is useful almost always.
189  // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the
190  // memset will be split into 2 32-bit stores anyway) and doing so can
191  // pessimize the llvm optimizer.
192  //
193  // Since we don't have perfect knowledge here, make some assumptions: assume
194  // the maximum GPR width is the same size as the pointer size and assume that
195  // this width can be stored.  If so, check to see whether we will end up
196  // actually reducing the number of stores used.
197  unsigned Bytes = unsigned(End-Start);
198  unsigned NumPointerStores = Bytes/TD.getPointerSize();
199
200  // Assume the remaining bytes if any are done a byte at a time.
201  unsigned NumByteStores = Bytes - NumPointerStores*TD.getPointerSize();
202
203  // If we will reduce the # stores (according to this heuristic), do the
204  // transformation.  This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
205  // etc.
206  return TheStores.size() > NumPointerStores+NumByteStores;
207}
208
209
210namespace {
211class MemsetRanges {
212  /// Ranges - A sorted list of the memset ranges.  We use std::list here
213  /// because each element is relatively large and expensive to copy.
214  std::list<MemsetRange> Ranges;
215  typedef std::list<MemsetRange>::iterator range_iterator;
216  TargetData &TD;
217public:
218  MemsetRanges(TargetData &td) : TD(td) {}
219
220  typedef std::list<MemsetRange>::const_iterator const_iterator;
221  const_iterator begin() const { return Ranges.begin(); }
222  const_iterator end() const { return Ranges.end(); }
223  bool empty() const { return Ranges.empty(); }
224
225  void addStore(int64_t OffsetFromFirst, StoreInst *SI);
226};
227
228} // end anon namespace
229
230
231/// addStore - Add a new store to the MemsetRanges data structure.  This adds a
232/// new range for the specified store at the specified offset, merging into
233/// existing ranges as appropriate.
234void MemsetRanges::addStore(int64_t Start, StoreInst *SI) {
235  int64_t End = Start+TD.getTypeStoreSize(SI->getOperand(0)->getType());
236
237  // Do a linear search of the ranges to see if this can be joined and/or to
238  // find the insertion point in the list.  We keep the ranges sorted for
239  // simplicity here.  This is a linear search of a linked list, which is ugly,
240  // however the number of ranges is limited, so this won't get crazy slow.
241  range_iterator I = Ranges.begin(), E = Ranges.end();
242
243  while (I != E && Start > I->End)
244    ++I;
245
246  // We now know that I == E, in which case we didn't find anything to merge
247  // with, or that Start <= I->End.  If End < I->Start or I == E, then we need
248  // to insert a new range.  Handle this now.
249  if (I == E || End < I->Start) {
250    MemsetRange &R = *Ranges.insert(I, MemsetRange());
251    R.Start        = Start;
252    R.End          = End;
253    R.StartPtr     = SI->getPointerOperand();
254    R.Alignment    = SI->getAlignment();
255    R.TheStores.push_back(SI);
256    return;
257  }
258
259  // This store overlaps with I, add it.
260  I->TheStores.push_back(SI);
261
262  // At this point, we may have an interval that completely contains our store.
263  // If so, just add it to the interval and return.
264  if (I->Start <= Start && I->End >= End)
265    return;
266
267  // Now we know that Start <= I->End and End >= I->Start so the range overlaps
268  // but is not entirely contained within the range.
269
270  // See if the range extends the start of the range.  In this case, it couldn't
271  // possibly cause it to join the prior range, because otherwise we would have
272  // stopped on *it*.
273  if (Start < I->Start) {
274    I->Start = Start;
275    I->StartPtr = SI->getPointerOperand();
276  }
277
278  // Now we know that Start <= I->End and Start >= I->Start (so the startpoint
279  // is in or right at the end of I), and that End >= I->Start.  Extend I out to
280  // End.
281  if (End > I->End) {
282    I->End = End;
283    range_iterator NextI = I;
284    while (++NextI != E && End >= NextI->Start) {
285      // Merge the range in.
286      I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end());
287      if (NextI->End > I->End)
288        I->End = NextI->End;
289      Ranges.erase(NextI);
290      NextI = I;
291    }
292  }
293}
294
295//===----------------------------------------------------------------------===//
296//                         MemCpyOpt Pass
297//===----------------------------------------------------------------------===//
298
299namespace {
300  class MemCpyOpt : public FunctionPass {
301    bool runOnFunction(Function &F);
302  public:
303    static char ID; // Pass identification, replacement for typeid
304    MemCpyOpt() : FunctionPass(&ID) {}
305
306  private:
307    // This transformation requires dominator postdominator info
308    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
309      AU.setPreservesCFG();
310      AU.addRequired<DominatorTree>();
311      AU.addRequired<MemoryDependenceAnalysis>();
312      AU.addRequired<AliasAnalysis>();
313      AU.addPreserved<AliasAnalysis>();
314      AU.addPreserved<MemoryDependenceAnalysis>();
315    }
316
317    // Helper fuctions
318    bool processStore(StoreInst *SI, BasicBlock::iterator &BBI);
319    bool processMemCpy(MemCpyInst *M);
320    bool processMemMove(MemMoveInst *M);
321    bool performCallSlotOptzn(MemCpyInst *cpy, CallInst *C);
322    bool iterateOnFunction(Function &F);
323  };
324
325  char MemCpyOpt::ID = 0;
326}
327
328// createMemCpyOptPass - The public interface to this file...
329FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOpt(); }
330
331static RegisterPass<MemCpyOpt> X("memcpyopt",
332                                 "MemCpy Optimization");
333
334
335
336/// processStore - When GVN is scanning forward over instructions, we look for
337/// some other patterns to fold away.  In particular, this looks for stores to
338/// neighboring locations of memory.  If it sees enough consequtive ones
339/// (currently 4) it attempts to merge them together into a memcpy/memset.
340bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
341  if (SI->isVolatile()) return false;
342
343  // There are two cases that are interesting for this code to handle: memcpy
344  // and memset.  Right now we only handle memset.
345
346  // Ensure that the value being stored is something that can be memset'able a
347  // byte at a time like "0" or "-1" or any width, as well as things like
348  // 0xA0A0A0A0 and 0.0.
349  Value *ByteVal = isBytewiseValue(SI->getOperand(0), SI->getContext());
350  if (!ByteVal)
351    return false;
352
353  TargetData *TD = getAnalysisIfAvailable<TargetData>();
354  if (!TD) return false;
355  AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
356  Module *M = SI->getParent()->getParent()->getParent();
357
358  // Okay, so we now have a single store that can be splatable.  Scan to find
359  // all subsequent stores of the same value to offset from the same pointer.
360  // Join these together into ranges, so we can decide whether contiguous blocks
361  // are stored.
362  MemsetRanges Ranges(*TD);
363
364  Value *StartPtr = SI->getPointerOperand();
365
366  BasicBlock::iterator BI = SI;
367  for (++BI; !isa<TerminatorInst>(BI); ++BI) {
368    if (isa<CallInst>(BI) || isa<InvokeInst>(BI)) {
369      // If the call is readnone, ignore it, otherwise bail out.  We don't even
370      // allow readonly here because we don't want something like:
371      // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
372      if (AA.getModRefBehavior(CallSite::get(BI)) ==
373            AliasAnalysis::DoesNotAccessMemory)
374        continue;
375
376      // TODO: If this is a memset, try to join it in.
377
378      break;
379    } else if (isa<VAArgInst>(BI) || isa<LoadInst>(BI))
380      break;
381
382    // If this is a non-store instruction it is fine, ignore it.
383    StoreInst *NextStore = dyn_cast<StoreInst>(BI);
384    if (NextStore == 0) continue;
385
386    // If this is a store, see if we can merge it in.
387    if (NextStore->isVolatile()) break;
388
389    // Check to see if this stored value is of the same byte-splattable value.
390    if (ByteVal != isBytewiseValue(NextStore->getOperand(0),
391                                   NextStore->getContext()))
392      break;
393
394    // Check to see if this store is to a constant offset from the start ptr.
395    int64_t Offset;
396    if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset, *TD))
397      break;
398
399    Ranges.addStore(Offset, NextStore);
400  }
401
402  // If we have no ranges, then we just had a single store with nothing that
403  // could be merged in.  This is a very common case of course.
404  if (Ranges.empty())
405    return false;
406
407  // If we had at least one store that could be merged in, add the starting
408  // store as well.  We try to avoid this unless there is at least something
409  // interesting as a small compile-time optimization.
410  Ranges.addStore(0, SI);
411
412
413  Function *MemSetF = 0;
414
415  // Now that we have full information about ranges, loop over the ranges and
416  // emit memset's for anything big enough to be worthwhile.
417  bool MadeChange = false;
418  for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end();
419       I != E; ++I) {
420    const MemsetRange &Range = *I;
421
422    if (Range.TheStores.size() == 1) continue;
423
424    // If it is profitable to lower this range to memset, do so now.
425    if (!Range.isProfitableToUseMemset(*TD))
426      continue;
427
428    // Otherwise, we do want to transform this!  Create a new memset.  We put
429    // the memset right before the first instruction that isn't part of this
430    // memset block.  This ensure that the memset is dominated by any addressing
431    // instruction needed by the start of the block.
432    BasicBlock::iterator InsertPt = BI;
433
434    if (MemSetF == 0) {
435      const Type *Ty = Type::getInt64Ty(SI->getContext());
436      MemSetF = Intrinsic::getDeclaration(M, Intrinsic::memset, &Ty, 1);
437   }
438
439    // Get the starting pointer of the block.
440    StartPtr = Range.StartPtr;
441
442    // Cast the start ptr to be i8* as memset requires.
443    const Type *i8Ptr =
444          PointerType::getUnqual(Type::getInt8Ty(SI->getContext()));
445    if (StartPtr->getType() != i8Ptr)
446      StartPtr = new BitCastInst(StartPtr, i8Ptr, StartPtr->getName(),
447                                 InsertPt);
448
449    Value *Ops[] = {
450      StartPtr, ByteVal,   // Start, value
451      // size
452      ConstantInt::get(Type::getInt64Ty(SI->getContext()),
453                       Range.End-Range.Start),
454      // align
455      ConstantInt::get(Type::getInt32Ty(SI->getContext()), Range.Alignment)
456    };
457    Value *C = CallInst::Create(MemSetF, Ops, Ops+4, "", InsertPt);
458    DEBUG(errs() << "Replace stores:\n";
459          for (unsigned i = 0, e = Range.TheStores.size(); i != e; ++i)
460            errs() << *Range.TheStores[i];
461          errs() << "With: " << *C); C=C;
462
463    // Don't invalidate the iterator
464    BBI = BI;
465
466    // Zap all the stores.
467    for (SmallVector<StoreInst*, 16>::const_iterator SI = Range.TheStores.begin(),
468         SE = Range.TheStores.end(); SI != SE; ++SI)
469      (*SI)->eraseFromParent();
470    ++NumMemSetInfer;
471    MadeChange = true;
472  }
473
474  return MadeChange;
475}
476
477
478/// performCallSlotOptzn - takes a memcpy and a call that it depends on,
479/// and checks for the possibility of a call slot optimization by having
480/// the call write its result directly into the destination of the memcpy.
481bool MemCpyOpt::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C) {
482  // The general transformation to keep in mind is
483  //
484  //   call @func(..., src, ...)
485  //   memcpy(dest, src, ...)
486  //
487  // ->
488  //
489  //   memcpy(dest, src, ...)
490  //   call @func(..., dest, ...)
491  //
492  // Since moving the memcpy is technically awkward, we additionally check that
493  // src only holds uninitialized values at the moment of the call, meaning that
494  // the memcpy can be discarded rather than moved.
495
496  // Deliberately get the source and destination with bitcasts stripped away,
497  // because we'll need to do type comparisons based on the underlying type.
498  Value *cpyDest = cpy->getDest();
499  Value *cpySrc = cpy->getSource();
500  CallSite CS = CallSite::get(C);
501
502  // We need to be able to reason about the size of the memcpy, so we require
503  // that it be a constant.
504  ConstantInt *cpyLength = dyn_cast<ConstantInt>(cpy->getLength());
505  if (!cpyLength)
506    return false;
507
508  // Require that src be an alloca.  This simplifies the reasoning considerably.
509  AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc);
510  if (!srcAlloca)
511    return false;
512
513  // Check that all of src is copied to dest.
514  TargetData *TD = getAnalysisIfAvailable<TargetData>();
515  if (!TD) return false;
516
517  ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
518  if (!srcArraySize)
519    return false;
520
521  uint64_t srcSize = TD->getTypeAllocSize(srcAlloca->getAllocatedType()) *
522    srcArraySize->getZExtValue();
523
524  if (cpyLength->getZExtValue() < srcSize)
525    return false;
526
527  // Check that accessing the first srcSize bytes of dest will not cause a
528  // trap.  Otherwise the transform is invalid since it might cause a trap
529  // to occur earlier than it otherwise would.
530  if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) {
531    // The destination is an alloca.  Check it is larger than srcSize.
532    ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize());
533    if (!destArraySize)
534      return false;
535
536    uint64_t destSize = TD->getTypeAllocSize(A->getAllocatedType()) *
537      destArraySize->getZExtValue();
538
539    if (destSize < srcSize)
540      return false;
541  } else if (Argument *A = dyn_cast<Argument>(cpyDest)) {
542    // If the destination is an sret parameter then only accesses that are
543    // outside of the returned struct type can trap.
544    if (!A->hasStructRetAttr())
545      return false;
546
547    const Type *StructTy = cast<PointerType>(A->getType())->getElementType();
548    uint64_t destSize = TD->getTypeAllocSize(StructTy);
549
550    if (destSize < srcSize)
551      return false;
552  } else {
553    return false;
554  }
555
556  // Check that src is not accessed except via the call and the memcpy.  This
557  // guarantees that it holds only undefined values when passed in (so the final
558  // memcpy can be dropped), that it is not read or written between the call and
559  // the memcpy, and that writing beyond the end of it is undefined.
560  SmallVector<User*, 8> srcUseList(srcAlloca->use_begin(),
561                                   srcAlloca->use_end());
562  while (!srcUseList.empty()) {
563    User *UI = srcUseList.back();
564    srcUseList.pop_back();
565
566    if (isa<BitCastInst>(UI)) {
567      for (User::use_iterator I = UI->use_begin(), E = UI->use_end();
568           I != E; ++I)
569        srcUseList.push_back(*I);
570    } else if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(UI)) {
571      if (G->hasAllZeroIndices())
572        for (User::use_iterator I = UI->use_begin(), E = UI->use_end();
573             I != E; ++I)
574          srcUseList.push_back(*I);
575      else
576        return false;
577    } else if (UI != C && UI != cpy) {
578      return false;
579    }
580  }
581
582  // Since we're changing the parameter to the callsite, we need to make sure
583  // that what would be the new parameter dominates the callsite.
584  DominatorTree &DT = getAnalysis<DominatorTree>();
585  if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest))
586    if (!DT.dominates(cpyDestInst, C))
587      return false;
588
589  // In addition to knowing that the call does not access src in some
590  // unexpected manner, for example via a global, which we deduce from
591  // the use analysis, we also need to know that it does not sneakily
592  // access dest.  We rely on AA to figure this out for us.
593  AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
594  if (AA.getModRefInfo(C, cpy->getRawDest(), srcSize) !=
595      AliasAnalysis::NoModRef)
596    return false;
597
598  // All the checks have passed, so do the transformation.
599  bool changedArgument = false;
600  for (unsigned i = 0; i < CS.arg_size(); ++i)
601    if (CS.getArgument(i)->stripPointerCasts() == cpySrc) {
602      if (cpySrc->getType() != cpyDest->getType())
603        cpyDest = CastInst::CreatePointerCast(cpyDest, cpySrc->getType(),
604                                              cpyDest->getName(), C);
605      changedArgument = true;
606      if (CS.getArgument(i)->getType() == cpyDest->getType())
607        CS.setArgument(i, cpyDest);
608      else
609        CS.setArgument(i, CastInst::CreatePointerCast(cpyDest,
610                          CS.getArgument(i)->getType(), cpyDest->getName(), C));
611    }
612
613  if (!changedArgument)
614    return false;
615
616  // Drop any cached information about the call, because we may have changed
617  // its dependence information by changing its parameter.
618  MemoryDependenceAnalysis &MD = getAnalysis<MemoryDependenceAnalysis>();
619  MD.removeInstruction(C);
620
621  // Remove the memcpy
622  MD.removeInstruction(cpy);
623  cpy->eraseFromParent();
624  NumMemCpyInstr++;
625
626  return true;
627}
628
629/// processMemCpy - perform simplication of memcpy's.  If we have memcpy A which
630/// copies X to Y, and memcpy B which copies Y to Z, then we can rewrite B to be
631/// a memcpy from X to Z (or potentially a memmove, depending on circumstances).
632///  This allows later passes to remove the first memcpy altogether.
633bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
634  MemoryDependenceAnalysis &MD = getAnalysis<MemoryDependenceAnalysis>();
635
636  // The are two possible optimizations we can do for memcpy:
637  //   a) memcpy-memcpy xform which exposes redundance for DSE.
638  //   b) call-memcpy xform for return slot optimization.
639  MemDepResult dep = MD.getDependency(M);
640  if (!dep.isClobber())
641    return false;
642  if (!isa<MemCpyInst>(dep.getInst())) {
643    if (CallInst *C = dyn_cast<CallInst>(dep.getInst()))
644      return performCallSlotOptzn(M, C);
645    return false;
646  }
647
648  MemCpyInst *MDep = cast<MemCpyInst>(dep.getInst());
649
650  // We can only transforms memcpy's where the dest of one is the source of the
651  // other
652  if (M->getSource() != MDep->getDest())
653    return false;
654
655  // Second, the length of the memcpy's must be the same, or the preceeding one
656  // must be larger than the following one.
657  ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
658  ConstantInt *C2 = dyn_cast<ConstantInt>(M->getLength());
659  if (!C1 || !C2)
660    return false;
661
662  uint64_t DepSize = C1->getValue().getZExtValue();
663  uint64_t CpySize = C2->getValue().getZExtValue();
664
665  if (DepSize < CpySize)
666    return false;
667
668  // Finally, we have to make sure that the dest of the second does not
669  // alias the source of the first
670  AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
671  if (AA.alias(M->getRawDest(), CpySize, MDep->getRawSource(), DepSize) !=
672      AliasAnalysis::NoAlias)
673    return false;
674  else if (AA.alias(M->getRawDest(), CpySize, M->getRawSource(), CpySize) !=
675           AliasAnalysis::NoAlias)
676    return false;
677  else if (AA.alias(MDep->getRawDest(), DepSize, MDep->getRawSource(), DepSize)
678           != AliasAnalysis::NoAlias)
679    return false;
680
681  // If all checks passed, then we can transform these memcpy's
682  const Type *Ty = M->getLength()->getType();
683  Function *MemCpyFun = Intrinsic::getDeclaration(
684                                 M->getParent()->getParent()->getParent(),
685                                 M->getIntrinsicID(), &Ty, 1);
686
687  Value *Args[4] = {
688    M->getRawDest(), MDep->getRawSource(), M->getLength(), M->getAlignmentCst()
689  };
690
691  CallInst *C = CallInst::Create(MemCpyFun, Args, Args+4, "", M);
692
693
694  // If C and M don't interfere, then this is a valid transformation.  If they
695  // did, this would mean that the two sources overlap, which would be bad.
696  if (MD.getDependency(C) == dep) {
697    MD.removeInstruction(M);
698    M->eraseFromParent();
699    NumMemCpyInstr++;
700    return true;
701  }
702
703  // Otherwise, there was no point in doing this, so we remove the call we
704  // inserted and act like nothing happened.
705  MD.removeInstruction(C);
706  C->eraseFromParent();
707  return false;
708}
709
710/// processMemMove - Transforms memmove calls to memcpy calls when the src/dst
711/// are guaranteed not to alias.
712bool MemCpyOpt::processMemMove(MemMoveInst *M) {
713  AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
714
715  // If the memmove is a constant size, use it for the alias query, this allows
716  // us to optimize things like: memmove(P, P+64, 64);
717  uint64_t MemMoveSize = ~0ULL;
718  if (ConstantInt *Len = dyn_cast<ConstantInt>(M->getLength()))
719    MemMoveSize = Len->getZExtValue();
720
721  // See if the pointers alias.
722  if (AA.alias(M->getRawDest(), MemMoveSize, M->getRawSource(), MemMoveSize) !=
723      AliasAnalysis::NoAlias)
724    return false;
725
726  DEBUG(errs() << "MemCpyOpt: Optimizing memmove -> memcpy: " << *M << "\n");
727
728  // If not, then we know we can transform this.
729  Module *Mod = M->getParent()->getParent()->getParent();
730  const Type *Ty = M->getLength()->getType();
731  M->setOperand(0, Intrinsic::getDeclaration(Mod, Intrinsic::memcpy, &Ty, 1));
732
733  // MemDep may have over conservative information about this instruction, just
734  // conservatively flush it from the cache.
735  getAnalysis<MemoryDependenceAnalysis>().removeInstruction(M);
736
737  ++NumMoveToCpy;
738  return true;
739}
740
741
742// MemCpyOpt::iterateOnFunction - Executes one iteration of GVN.
743bool MemCpyOpt::iterateOnFunction(Function &F) {
744  bool MadeChange = false;
745
746  // Walk all instruction in the function.
747  for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) {
748    for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
749         BI != BE;) {
750      // Avoid invalidating the iterator.
751      Instruction *I = BI++;
752
753      if (StoreInst *SI = dyn_cast<StoreInst>(I))
754        MadeChange |= processStore(SI, BI);
755      else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I))
756        MadeChange |= processMemCpy(M);
757      else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I)) {
758        if (processMemMove(M)) {
759          --BI;         // Reprocess the new memcpy.
760          MadeChange = true;
761        }
762      }
763    }
764  }
765
766  return MadeChange;
767}
768
769// MemCpyOpt::runOnFunction - This is the main transformation entry point for a
770// function.
771//
772bool MemCpyOpt::runOnFunction(Function &F) {
773  bool MadeChange = false;
774  while (1) {
775    if (!iterateOnFunction(F))
776      break;
777    MadeChange = true;
778  }
779
780  return MadeChange;
781}
782
783
784
785