MemCpyOptimizer.cpp revision 824b958e6fb1236e92e4d07f3acf18fca107cdc0
1//===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass performs various transformations related to eliminating memcpy
11// calls, or transforming sets of stores into memset's.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "memcpyopt"
16#include "llvm/Transforms/Scalar.h"
17#include "llvm/IntrinsicInst.h"
18#include "llvm/Instructions.h"
19#include "llvm/ADT/SmallVector.h"
20#include "llvm/ADT/Statistic.h"
21#include "llvm/Analysis/Dominators.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/Analysis/MemoryDependenceAnalysis.h"
24#include "llvm/Support/Debug.h"
25#include "llvm/Support/GetElementPtrTypeIterator.h"
26#include "llvm/Target/TargetData.h"
27#include <list>
28using namespace llvm;
29
30STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted");
31STATISTIC(NumMemSetInfer, "Number of memsets inferred");
32
33/// isBytewiseValue - If the specified value can be set by repeating the same
34/// byte in memory, return the i8 value that it is represented with.  This is
35/// true for all i8 values obviously, but is also true for i32 0, i32 -1,
36/// i16 0xF0F0, double 0.0 etc.  If the value can't be handled with a repeated
37/// byte store (e.g. i16 0x1234), return null.
38static Value *isBytewiseValue(Value *V) {
39  // All byte-wide stores are splatable, even of arbitrary variables.
40  if (V->getType() == Type::Int8Ty) return V;
41
42  // Constant float and double values can be handled as integer values if the
43  // corresponding integer value is "byteable".  An important case is 0.0.
44  if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
45    if (CFP->getType() == Type::FloatTy)
46      V = ConstantExpr::getBitCast(CFP, Type::Int32Ty);
47    if (CFP->getType() == Type::DoubleTy)
48      V = ConstantExpr::getBitCast(CFP, Type::Int64Ty);
49    // Don't handle long double formats, which have strange constraints.
50  }
51
52  // We can handle constant integers that are power of two in size and a
53  // multiple of 8 bits.
54  if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
55    unsigned Width = CI->getBitWidth();
56    if (isPowerOf2_32(Width) && Width > 8) {
57      // We can handle this value if the recursive binary decomposition is the
58      // same at all levels.
59      APInt Val = CI->getValue();
60      APInt Val2;
61      while (Val.getBitWidth() != 8) {
62        unsigned NextWidth = Val.getBitWidth()/2;
63        Val2  = Val.lshr(NextWidth);
64        Val2.trunc(Val.getBitWidth()/2);
65        Val.trunc(Val.getBitWidth()/2);
66
67        // If the top/bottom halves aren't the same, reject it.
68        if (Val != Val2)
69          return 0;
70      }
71      return ConstantInt::get(Val);
72    }
73  }
74
75  // Conceptually, we could handle things like:
76  //   %a = zext i8 %X to i16
77  //   %b = shl i16 %a, 8
78  //   %c = or i16 %a, %b
79  // but until there is an example that actually needs this, it doesn't seem
80  // worth worrying about.
81  return 0;
82}
83
84static int64_t GetOffsetFromIndex(const GetElementPtrInst *GEP, unsigned Idx,
85                                  bool &VariableIdxFound, TargetData &TD) {
86  // Skip over the first indices.
87  gep_type_iterator GTI = gep_type_begin(GEP);
88  for (unsigned i = 1; i != Idx; ++i, ++GTI)
89    /*skip along*/;
90
91  // Compute the offset implied by the rest of the indices.
92  int64_t Offset = 0;
93  for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
94    ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
95    if (OpC == 0)
96      return VariableIdxFound = true;
97    if (OpC->isZero()) continue;  // No offset.
98
99    // Handle struct indices, which add their field offset to the pointer.
100    if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
101      Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
102      continue;
103    }
104
105    // Otherwise, we have a sequential type like an array or vector.  Multiply
106    // the index by the ElementSize.
107    uint64_t Size = TD.getABITypeSize(GTI.getIndexedType());
108    Offset += Size*OpC->getSExtValue();
109  }
110
111  return Offset;
112}
113
114/// IsPointerOffset - Return true if Ptr1 is provably equal to Ptr2 plus a
115/// constant offset, and return that constant offset.  For example, Ptr1 might
116/// be &A[42], and Ptr2 might be &A[40].  In this case offset would be -8.
117static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
118                            TargetData &TD) {
119  // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
120  // base.  After that base, they may have some number of common (and
121  // potentially variable) indices.  After that they handle some constant
122  // offset, which determines their offset from each other.  At this point, we
123  // handle no other case.
124  GetElementPtrInst *GEP1 = dyn_cast<GetElementPtrInst>(Ptr1);
125  GetElementPtrInst *GEP2 = dyn_cast<GetElementPtrInst>(Ptr2);
126  if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
127    return false;
128
129  // Skip any common indices and track the GEP types.
130  unsigned Idx = 1;
131  for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
132    if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
133      break;
134
135  bool VariableIdxFound = false;
136  int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, TD);
137  int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, TD);
138  if (VariableIdxFound) return false;
139
140  Offset = Offset2-Offset1;
141  return true;
142}
143
144
145/// MemsetRange - Represents a range of memset'd bytes with the ByteVal value.
146/// This allows us to analyze stores like:
147///   store 0 -> P+1
148///   store 0 -> P+0
149///   store 0 -> P+3
150///   store 0 -> P+2
151/// which sometimes happens with stores to arrays of structs etc.  When we see
152/// the first store, we make a range [1, 2).  The second store extends the range
153/// to [0, 2).  The third makes a new range [2, 3).  The fourth store joins the
154/// two ranges into [0, 3) which is memset'able.
155namespace {
156struct MemsetRange {
157  // Start/End - A semi range that describes the span that this range covers.
158  // The range is closed at the start and open at the end: [Start, End).
159  int64_t Start, End;
160
161  /// StartPtr - The getelementptr instruction that points to the start of the
162  /// range.
163  Value *StartPtr;
164
165  /// Alignment - The known alignment of the first store.
166  unsigned Alignment;
167
168  /// TheStores - The actual stores that make up this range.
169  SmallVector<StoreInst*, 16> TheStores;
170
171  bool isProfitableToUseMemset(const TargetData &TD) const;
172
173};
174} // end anon namespace
175
176bool MemsetRange::isProfitableToUseMemset(const TargetData &TD) const {
177  // If we found more than 8 stores to merge or 64 bytes, use memset.
178  if (TheStores.size() >= 8 || End-Start >= 64) return true;
179
180  // Assume that the code generator is capable of merging pairs of stores
181  // together if it wants to.
182  if (TheStores.size() <= 2) return false;
183
184  // If we have fewer than 8 stores, it can still be worthwhile to do this.
185  // For example, merging 4 i8 stores into an i32 store is useful almost always.
186  // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the
187  // memset will be split into 2 32-bit stores anyway) and doing so can
188  // pessimize the llvm optimizer.
189  //
190  // Since we don't have perfect knowledge here, make some assumptions: assume
191  // the maximum GPR width is the same size as the pointer size and assume that
192  // this width can be stored.  If so, check to see whether we will end up
193  // actually reducing the number of stores used.
194  unsigned Bytes = unsigned(End-Start);
195  unsigned NumPointerStores = Bytes/TD.getPointerSize();
196
197  // Assume the remaining bytes if any are done a byte at a time.
198  unsigned NumByteStores = Bytes - NumPointerStores*TD.getPointerSize();
199
200  // If we will reduce the # stores (according to this heuristic), do the
201  // transformation.  This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
202  // etc.
203  return TheStores.size() > NumPointerStores+NumByteStores;
204}
205
206
207namespace {
208class MemsetRanges {
209  /// Ranges - A sorted list of the memset ranges.  We use std::list here
210  /// because each element is relatively large and expensive to copy.
211  std::list<MemsetRange> Ranges;
212  typedef std::list<MemsetRange>::iterator range_iterator;
213  TargetData &TD;
214public:
215  MemsetRanges(TargetData &td) : TD(td) {}
216
217  typedef std::list<MemsetRange>::const_iterator const_iterator;
218  const_iterator begin() const { return Ranges.begin(); }
219  const_iterator end() const { return Ranges.end(); }
220  bool empty() const { return Ranges.empty(); }
221
222  void addStore(int64_t OffsetFromFirst, StoreInst *SI);
223};
224
225} // end anon namespace
226
227
228/// addStore - Add a new store to the MemsetRanges data structure.  This adds a
229/// new range for the specified store at the specified offset, merging into
230/// existing ranges as appropriate.
231void MemsetRanges::addStore(int64_t Start, StoreInst *SI) {
232  int64_t End = Start+TD.getTypeStoreSize(SI->getOperand(0)->getType());
233
234  // Do a linear search of the ranges to see if this can be joined and/or to
235  // find the insertion point in the list.  We keep the ranges sorted for
236  // simplicity here.  This is a linear search of a linked list, which is ugly,
237  // however the number of ranges is limited, so this won't get crazy slow.
238  range_iterator I = Ranges.begin(), E = Ranges.end();
239
240  while (I != E && Start > I->End)
241    ++I;
242
243  // We now know that I == E, in which case we didn't find anything to merge
244  // with, or that Start <= I->End.  If End < I->Start or I == E, then we need
245  // to insert a new range.  Handle this now.
246  if (I == E || End < I->Start) {
247    MemsetRange &R = *Ranges.insert(I, MemsetRange());
248    R.Start        = Start;
249    R.End          = End;
250    R.StartPtr     = SI->getPointerOperand();
251    R.Alignment    = SI->getAlignment();
252    R.TheStores.push_back(SI);
253    return;
254  }
255
256  // This store overlaps with I, add it.
257  I->TheStores.push_back(SI);
258
259  // At this point, we may have an interval that completely contains our store.
260  // If so, just add it to the interval and return.
261  if (I->Start <= Start && I->End >= End)
262    return;
263
264  // Now we know that Start <= I->End and End >= I->Start so the range overlaps
265  // but is not entirely contained within the range.
266
267  // See if the range extends the start of the range.  In this case, it couldn't
268  // possibly cause it to join the prior range, because otherwise we would have
269  // stopped on *it*.
270  if (Start < I->Start) {
271    I->Start = Start;
272    I->StartPtr = SI->getPointerOperand();
273  }
274
275  // Now we know that Start <= I->End and Start >= I->Start (so the startpoint
276  // is in or right at the end of I), and that End >= I->Start.  Extend I out to
277  // End.
278  if (End > I->End) {
279    I->End = End;
280    range_iterator NextI = I;;
281    while (++NextI != E && End >= NextI->Start) {
282      // Merge the range in.
283      I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end());
284      if (NextI->End > I->End)
285        I->End = NextI->End;
286      Ranges.erase(NextI);
287      NextI = I;
288    }
289  }
290}
291
292//===----------------------------------------------------------------------===//
293//                         MemCpyOpt Pass
294//===----------------------------------------------------------------------===//
295
296namespace {
297
298  class VISIBILITY_HIDDEN MemCpyOpt : public FunctionPass {
299    bool runOnFunction(Function &F);
300  public:
301    static char ID; // Pass identification, replacement for typeid
302    MemCpyOpt() : FunctionPass(&ID) {}
303
304  private:
305    // This transformation requires dominator postdominator info
306    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
307      AU.setPreservesCFG();
308      AU.addRequired<DominatorTree>();
309      AU.addRequired<MemoryDependenceAnalysis>();
310      AU.addRequired<AliasAnalysis>();
311      AU.addRequired<TargetData>();
312      AU.addPreserved<AliasAnalysis>();
313      AU.addPreserved<MemoryDependenceAnalysis>();
314      AU.addPreserved<TargetData>();
315    }
316
317    // Helper fuctions
318    bool processStore(StoreInst *SI, BasicBlock::iterator& BBI);
319    bool processMemCpy(MemCpyInst* M);
320    bool performCallSlotOptzn(MemCpyInst* cpy, CallInst* C);
321    bool iterateOnFunction(Function &F);
322  };
323
324  char MemCpyOpt::ID = 0;
325}
326
327// createMemCpyOptPass - The public interface to this file...
328FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOpt(); }
329
330static RegisterPass<MemCpyOpt> X("memcpyopt",
331                                 "MemCpy Optimization");
332
333
334
335/// processStore - When GVN is scanning forward over instructions, we look for
336/// some other patterns to fold away.  In particular, this looks for stores to
337/// neighboring locations of memory.  If it sees enough consequtive ones
338/// (currently 4) it attempts to merge them together into a memcpy/memset.
339bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator& BBI) {
340  if (SI->isVolatile()) return false;
341
342  // There are two cases that are interesting for this code to handle: memcpy
343  // and memset.  Right now we only handle memset.
344
345  // Ensure that the value being stored is something that can be memset'able a
346  // byte at a time like "0" or "-1" or any width, as well as things like
347  // 0xA0A0A0A0 and 0.0.
348  Value *ByteVal = isBytewiseValue(SI->getOperand(0));
349  if (!ByteVal)
350    return false;
351
352  TargetData &TD = getAnalysis<TargetData>();
353  AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
354
355  // Okay, so we now have a single store that can be splatable.  Scan to find
356  // all subsequent stores of the same value to offset from the same pointer.
357  // Join these together into ranges, so we can decide whether contiguous blocks
358  // are stored.
359  MemsetRanges Ranges(TD);
360
361  Value *StartPtr = SI->getPointerOperand();
362
363  BasicBlock::iterator BI = SI;
364  for (++BI; !isa<TerminatorInst>(BI); ++BI) {
365    if (isa<CallInst>(BI) || isa<InvokeInst>(BI)) {
366      // If the call is readnone, ignore it, otherwise bail out.  We don't even
367      // allow readonly here because we don't want something like:
368      // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
369      if (AA.getModRefBehavior(CallSite::get(BI)) ==
370            AliasAnalysis::DoesNotAccessMemory)
371        continue;
372
373      // TODO: If this is a memset, try to join it in.
374
375      break;
376    } else if (isa<VAArgInst>(BI) || isa<LoadInst>(BI))
377      break;
378
379    // If this is a non-store instruction it is fine, ignore it.
380    StoreInst *NextStore = dyn_cast<StoreInst>(BI);
381    if (NextStore == 0) continue;
382
383    // If this is a store, see if we can merge it in.
384    if (NextStore->isVolatile()) break;
385
386    // Check to see if this stored value is of the same byte-splattable value.
387    if (ByteVal != isBytewiseValue(NextStore->getOperand(0)))
388      break;
389
390    // Check to see if this store is to a constant offset from the start ptr.
391    int64_t Offset;
392    if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset, TD))
393      break;
394
395    Ranges.addStore(Offset, NextStore);
396  }
397
398  // If we have no ranges, then we just had a single store with nothing that
399  // could be merged in.  This is a very common case of course.
400  if (Ranges.empty())
401    return false;
402
403  // If we had at least one store that could be merged in, add the starting
404  // store as well.  We try to avoid this unless there is at least something
405  // interesting as a small compile-time optimization.
406  Ranges.addStore(0, SI);
407
408
409  Function *MemSetF = 0;
410
411  // Now that we have full information about ranges, loop over the ranges and
412  // emit memset's for anything big enough to be worthwhile.
413  bool MadeChange = false;
414  for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end();
415       I != E; ++I) {
416    const MemsetRange &Range = *I;
417
418    if (Range.TheStores.size() == 1) continue;
419
420    // If it is profitable to lower this range to memset, do so now.
421    if (!Range.isProfitableToUseMemset(TD))
422      continue;
423
424    // Otherwise, we do want to transform this!  Create a new memset.  We put
425    // the memset right before the first instruction that isn't part of this
426    // memset block.  This ensure that the memset is dominated by any addressing
427    // instruction needed by the start of the block.
428    BasicBlock::iterator InsertPt = BI;
429
430    if (MemSetF == 0) {
431      const Type *Tys[] = {Type::Int64Ty};
432      MemSetF = Intrinsic::getDeclaration(SI->getParent()->getParent()
433                                          ->getParent(), Intrinsic::memset,
434                                          Tys, 1);
435   }
436
437    // Get the starting pointer of the block.
438    StartPtr = Range.StartPtr;
439
440    // Cast the start ptr to be i8* as memset requires.
441    const Type *i8Ptr = PointerType::getUnqual(Type::Int8Ty);
442    if (StartPtr->getType() != i8Ptr)
443      StartPtr = new BitCastInst(StartPtr, i8Ptr, StartPtr->getNameStart(),
444                                 InsertPt);
445
446    Value *Ops[] = {
447      StartPtr, ByteVal,   // Start, value
448      ConstantInt::get(Type::Int64Ty, Range.End-Range.Start),  // size
449      ConstantInt::get(Type::Int32Ty, Range.Alignment)   // align
450    };
451    Value *C = CallInst::Create(MemSetF, Ops, Ops+4, "", InsertPt);
452    DEBUG(cerr << "Replace stores:\n";
453          for (unsigned i = 0, e = Range.TheStores.size(); i != e; ++i)
454            cerr << *Range.TheStores[i];
455          cerr << "With: " << *C); C=C;
456
457    // Don't invalidate the iterator
458    BBI = BI;
459
460    // Zap all the stores.
461    for (SmallVector<StoreInst*, 16>::const_iterator SI = Range.TheStores.begin(),
462         SE = Range.TheStores.end(); SI != SE; ++SI)
463      (*SI)->eraseFromParent();
464    ++NumMemSetInfer;
465    MadeChange = true;
466  }
467
468  return MadeChange;
469}
470
471
472/// performCallSlotOptzn - takes a memcpy and a call that it depends on,
473/// and checks for the possibility of a call slot optimization by having
474/// the call write its result directly into the destination of the memcpy.
475bool MemCpyOpt::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C) {
476  // The general transformation to keep in mind is
477  //
478  //   call @func(..., src, ...)
479  //   memcpy(dest, src, ...)
480  //
481  // ->
482  //
483  //   memcpy(dest, src, ...)
484  //   call @func(..., dest, ...)
485  //
486  // Since moving the memcpy is technically awkward, we additionally check that
487  // src only holds uninitialized values at the moment of the call, meaning that
488  // the memcpy can be discarded rather than moved.
489
490  // Deliberately get the source and destination with bitcasts stripped away,
491  // because we'll need to do type comparisons based on the underlying type.
492  Value* cpyDest = cpy->getDest();
493  Value* cpySrc = cpy->getSource();
494  CallSite CS = CallSite::get(C);
495
496  // We need to be able to reason about the size of the memcpy, so we require
497  // that it be a constant.
498  ConstantInt* cpyLength = dyn_cast<ConstantInt>(cpy->getLength());
499  if (!cpyLength)
500    return false;
501
502  // Require that src be an alloca.  This simplifies the reasoning considerably.
503  AllocaInst* srcAlloca = dyn_cast<AllocaInst>(cpySrc);
504  if (!srcAlloca)
505    return false;
506
507  // Check that all of src is copied to dest.
508  TargetData& TD = getAnalysis<TargetData>();
509
510  ConstantInt* srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
511  if (!srcArraySize)
512    return false;
513
514  uint64_t srcSize = TD.getABITypeSize(srcAlloca->getAllocatedType()) *
515    srcArraySize->getZExtValue();
516
517  if (cpyLength->getZExtValue() < srcSize)
518    return false;
519
520  // Check that accessing the first srcSize bytes of dest will not cause a
521  // trap.  Otherwise the transform is invalid since it might cause a trap
522  // to occur earlier than it otherwise would.
523  if (AllocaInst* A = dyn_cast<AllocaInst>(cpyDest)) {
524    // The destination is an alloca.  Check it is larger than srcSize.
525    ConstantInt* destArraySize = dyn_cast<ConstantInt>(A->getArraySize());
526    if (!destArraySize)
527      return false;
528
529    uint64_t destSize = TD.getABITypeSize(A->getAllocatedType()) *
530      destArraySize->getZExtValue();
531
532    if (destSize < srcSize)
533      return false;
534  } else if (Argument* A = dyn_cast<Argument>(cpyDest)) {
535    // If the destination is an sret parameter then only accesses that are
536    // outside of the returned struct type can trap.
537    if (!A->hasStructRetAttr())
538      return false;
539
540    const Type* StructTy = cast<PointerType>(A->getType())->getElementType();
541    uint64_t destSize = TD.getABITypeSize(StructTy);
542
543    if (destSize < srcSize)
544      return false;
545  } else {
546    return false;
547  }
548
549  // Check that src is not accessed except via the call and the memcpy.  This
550  // guarantees that it holds only undefined values when passed in (so the final
551  // memcpy can be dropped), that it is not read or written between the call and
552  // the memcpy, and that writing beyond the end of it is undefined.
553  SmallVector<User*, 8> srcUseList(srcAlloca->use_begin(),
554                                   srcAlloca->use_end());
555  while (!srcUseList.empty()) {
556    User* UI = srcUseList.back();
557    srcUseList.pop_back();
558
559    if (isa<BitCastInst>(UI)) {
560      for (User::use_iterator I = UI->use_begin(), E = UI->use_end();
561           I != E; ++I)
562        srcUseList.push_back(*I);
563    } else if (GetElementPtrInst* G = dyn_cast<GetElementPtrInst>(UI)) {
564      if (G->hasAllZeroIndices())
565        for (User::use_iterator I = UI->use_begin(), E = UI->use_end();
566             I != E; ++I)
567          srcUseList.push_back(*I);
568      else
569        return false;
570    } else if (UI != C && UI != cpy) {
571      return false;
572    }
573  }
574
575  // Since we're changing the parameter to the callsite, we need to make sure
576  // that what would be the new parameter dominates the callsite.
577  DominatorTree& DT = getAnalysis<DominatorTree>();
578  if (Instruction* cpyDestInst = dyn_cast<Instruction>(cpyDest))
579    if (!DT.dominates(cpyDestInst, C))
580      return false;
581
582  // In addition to knowing that the call does not access src in some
583  // unexpected manner, for example via a global, which we deduce from
584  // the use analysis, we also need to know that it does not sneakily
585  // access dest.  We rely on AA to figure this out for us.
586  AliasAnalysis& AA = getAnalysis<AliasAnalysis>();
587  if (AA.getModRefInfo(C, cpy->getRawDest(), srcSize) !=
588      AliasAnalysis::NoModRef)
589    return false;
590
591  // All the checks have passed, so do the transformation.
592  bool changedArgument = false;
593  for (unsigned i = 0; i < CS.arg_size(); ++i)
594    if (CS.getArgument(i)->stripPointerCasts() == cpySrc) {
595      if (cpySrc->getType() != cpyDest->getType())
596        cpyDest = CastInst::CreatePointerCast(cpyDest, cpySrc->getType(),
597                                              cpyDest->getName(), C);
598      changedArgument = true;
599      if (CS.getArgument(i)->getType() != cpyDest->getType())
600        CS.setArgument(i, CastInst::CreatePointerCast(cpyDest,
601                       CS.getArgument(i)->getType(), cpyDest->getName(), C));
602      else
603        CS.setArgument(i, cpyDest);
604    }
605
606  if (!changedArgument)
607    return false;
608
609  // Drop any cached information about the call, because we may have changed
610  // its dependence information by changing its parameter.
611  MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
612  MD.dropInstruction(C);
613
614  // Remove the memcpy
615  MD.removeInstruction(cpy);
616  cpy->eraseFromParent();
617  NumMemCpyInstr++;
618
619  return true;
620}
621
622/// processMemCpy - perform simplication of memcpy's.  If we have memcpy A which
623/// copies X to Y, and memcpy B which copies Y to Z, then we can rewrite B to be
624/// a memcpy from X to Z (or potentially a memmove, depending on circumstances).
625///  This allows later passes to remove the first memcpy altogether.
626bool MemCpyOpt::processMemCpy(MemCpyInst* M) {
627  MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
628
629  // The are two possible optimizations we can do for memcpy:
630  //   a) memcpy-memcpy xform which exposes redundance for DSE
631  //   b) call-memcpy xform for return slot optimization
632  Instruction* dep = MD.getDependency(M);
633  if (dep == MemoryDependenceAnalysis::None ||
634      dep == MemoryDependenceAnalysis::NonLocal)
635    return false;
636  else if (!isa<MemCpyInst>(dep)) {
637    if (CallInst* C = dyn_cast<CallInst>(dep))
638      return performCallSlotOptzn(M, C);
639    else
640      return false;
641  }
642
643  MemCpyInst* MDep = cast<MemCpyInst>(dep);
644
645  // We can only transforms memcpy's where the dest of one is the source of the
646  // other
647  if (M->getSource() != MDep->getDest())
648    return false;
649
650  // Second, the length of the memcpy's must be the same, or the preceeding one
651  // must be larger than the following one.
652  ConstantInt* C1 = dyn_cast<ConstantInt>(MDep->getLength());
653  ConstantInt* C2 = dyn_cast<ConstantInt>(M->getLength());
654  if (!C1 || !C2)
655    return false;
656
657  uint64_t DepSize = C1->getValue().getZExtValue();
658  uint64_t CpySize = C2->getValue().getZExtValue();
659
660  if (DepSize < CpySize)
661    return false;
662
663  // Finally, we have to make sure that the dest of the second does not
664  // alias the source of the first
665  AliasAnalysis& AA = getAnalysis<AliasAnalysis>();
666  if (AA.alias(M->getRawDest(), CpySize, MDep->getRawSource(), DepSize) !=
667      AliasAnalysis::NoAlias)
668    return false;
669  else if (AA.alias(M->getRawDest(), CpySize, M->getRawSource(), CpySize) !=
670           AliasAnalysis::NoAlias)
671    return false;
672  else if (AA.alias(MDep->getRawDest(), DepSize, MDep->getRawSource(), DepSize)
673           != AliasAnalysis::NoAlias)
674    return false;
675
676  // If all checks passed, then we can transform these memcpy's
677  const Type *Tys[1];
678  Tys[0] = M->getLength()->getType();
679  Function* MemCpyFun = Intrinsic::getDeclaration(
680                                 M->getParent()->getParent()->getParent(),
681                                 M->getIntrinsicID(), Tys, 1);
682
683  std::vector<Value*> args;
684  args.push_back(M->getRawDest());
685  args.push_back(MDep->getRawSource());
686  args.push_back(M->getLength());
687  args.push_back(M->getAlignment());
688
689  CallInst* C = CallInst::Create(MemCpyFun, args.begin(), args.end(), "", M);
690
691
692  // If C and M don't interfere, then this is a valid transformation.  If they
693  // did, this would mean that the two sources overlap, which would be bad.
694  if (MD.getDependency(C) == MDep) {
695    MD.dropInstruction(M);
696    M->eraseFromParent();
697
698    NumMemCpyInstr++;
699
700    return true;
701  }
702
703  // Otherwise, there was no point in doing this, so we remove the call we
704  // inserted and act like nothing happened.
705  MD.removeInstruction(C);
706  C->eraseFromParent();
707
708  return false;
709}
710
711// MemCpyOpt::runOnFunction - This is the main transformation entry point for a
712// function.
713//
714bool MemCpyOpt::runOnFunction(Function& F) {
715
716  bool changed = false;
717  bool shouldContinue = true;
718
719  while (shouldContinue) {
720    shouldContinue = iterateOnFunction(F);
721    changed |= shouldContinue;
722  }
723
724  return changed;
725}
726
727
728// MemCpyOpt::iterateOnFunction - Executes one iteration of GVN
729bool MemCpyOpt::iterateOnFunction(Function &F) {
730  bool changed_function = false;
731
732  // Walk all instruction in the function
733  for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) {
734    for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
735         BI != BE;) {
736      // Avoid invalidating the iterator
737      Instruction* I = BI++;
738
739      if (StoreInst *SI = dyn_cast<StoreInst>(I))
740        changed_function |= processStore(SI, BI);
741      else if (MemCpyInst* M = dyn_cast<MemCpyInst>(I)) {
742        changed_function |= processMemCpy(M);
743      }
744    }
745  }
746
747  return changed_function;
748}
749