InstCombineCalls.cpp revision 2b9375e44b0e99ce23d2cef8de78f66347eef88a
1//===- InstCombineCalls.cpp -----------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the visitCall and visitInvoke functions.
11//
12//===----------------------------------------------------------------------===//
13
14#include "InstCombine.h"
15#include "llvm/IntrinsicInst.h"
16#include "llvm/Support/CallSite.h"
17#include "llvm/Target/TargetData.h"
18#include "llvm/Analysis/MemoryBuiltins.h"
19#include "llvm/Transforms/Utils/BuildLibCalls.h"
20using namespace llvm;
21
22/// getPromotedType - Return the specified type promoted as it would be to pass
23/// though a va_arg area.
24static const Type *getPromotedType(const Type *Ty) {
25  if (const IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
26    if (ITy->getBitWidth() < 32)
27      return Type::getInt32Ty(Ty->getContext());
28  }
29  return Ty;
30}
31
32/// EnforceKnownAlignment - If the specified pointer points to an object that
33/// we control, modify the object's alignment to PrefAlign. This isn't
34/// often possible though. If alignment is important, a more reliable approach
35/// is to simply align all global variables and allocation instructions to
36/// their preferred alignment from the beginning.
37///
38static unsigned EnforceKnownAlignment(Value *V,
39                                      unsigned Align, unsigned PrefAlign) {
40
41  User *U = dyn_cast<User>(V);
42  if (!U) return Align;
43
44  switch (Operator::getOpcode(U)) {
45  default: break;
46  case Instruction::BitCast:
47    return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
48  case Instruction::GetElementPtr: {
49    // If all indexes are zero, it is just the alignment of the base pointer.
50    bool AllZeroOperands = true;
51    for (User::op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i)
52      if (!isa<Constant>(*i) ||
53          !cast<Constant>(*i)->isNullValue()) {
54        AllZeroOperands = false;
55        break;
56      }
57
58    if (AllZeroOperands) {
59      // Treat this like a bitcast.
60      return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
61    }
62    return Align;
63  }
64  case Instruction::Alloca: {
65    AllocaInst *AI = cast<AllocaInst>(V);
66    // If there is a requested alignment and if this is an alloca, round up.
67    if (AI->getAlignment() >= PrefAlign)
68      return AI->getAlignment();
69    AI->setAlignment(PrefAlign);
70    return PrefAlign;
71  }
72  }
73
74  if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
75    // If there is a large requested alignment and we can, bump up the alignment
76    // of the global.
77    if (GV->isDeclaration()) return Align;
78
79    if (GV->getAlignment() >= PrefAlign)
80      return GV->getAlignment();
81    // We can only increase the alignment of the global if it has no alignment
82    // specified or if it is not assigned a section.  If it is assigned a
83    // section, the global could be densely packed with other objects in the
84    // section, increasing the alignment could cause padding issues.
85    if (!GV->hasSection() || GV->getAlignment() == 0)
86      GV->setAlignment(PrefAlign);
87    return GV->getAlignment();
88  }
89
90  return Align;
91}
92
93/// GetOrEnforceKnownAlignment - If the specified pointer has an alignment that
94/// we can determine, return it, otherwise return 0.  If PrefAlign is specified,
95/// and it is more than the alignment of the ultimate object, see if we can
96/// increase the alignment of the ultimate object, making this check succeed.
97unsigned InstCombiner::GetOrEnforceKnownAlignment(Value *V,
98                                                  unsigned PrefAlign) {
99  assert(V->getType()->isPointerTy() &&
100         "GetOrEnforceKnownAlignment expects a pointer!");
101  unsigned BitWidth = TD ? TD->getPointerSizeInBits() : 64;
102  APInt Mask = APInt::getAllOnesValue(BitWidth);
103  APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
104  ComputeMaskedBits(V, Mask, KnownZero, KnownOne);
105  unsigned TrailZ = KnownZero.countTrailingOnes();
106
107  // Avoid trouble with rediculously large TrailZ values, such as
108  // those computed from a null pointer.
109  TrailZ = std::min(TrailZ, unsigned(sizeof(unsigned) * CHAR_BIT - 1));
110
111  unsigned Align = 1u << std::min(BitWidth - 1, TrailZ);
112
113  // LLVM doesn't support alignments larger than this currently.
114  Align = std::min(Align, +Value::MaximumAlignment);
115
116  if (PrefAlign > Align)
117    Align = EnforceKnownAlignment(V, Align, PrefAlign);
118
119    // We don't need to make any adjustment.
120  return Align;
121}
122
123Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
124  unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getArgOperand(0));
125  unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getArgOperand(1));
126  unsigned MinAlign = std::min(DstAlign, SrcAlign);
127  unsigned CopyAlign = MI->getAlignment();
128
129  if (CopyAlign < MinAlign) {
130    MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
131                                             MinAlign, false));
132    return MI;
133  }
134
135  // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
136  // load/store.
137  ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getArgOperand(2));
138  if (MemOpLength == 0) return 0;
139
140  // Source and destination pointer types are always "i8*" for intrinsic.  See
141  // if the size is something we can handle with a single primitive load/store.
142  // A single load+store correctly handles overlapping memory in the memmove
143  // case.
144  unsigned Size = MemOpLength->getZExtValue();
145  if (Size == 0) return MI;  // Delete this mem transfer.
146
147  if (Size > 8 || (Size&(Size-1)))
148    return 0;  // If not 1/2/4/8 bytes, exit.
149
150  // Use an integer load+store unless we can find something better.
151  unsigned SrcAddrSp =
152    cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
153  unsigned DstAddrSp =
154    cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
155
156  const IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
157  Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
158  Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
159
160  // Memcpy forces the use of i8* for the source and destination.  That means
161  // that if you're using memcpy to move one double around, you'll get a cast
162  // from double* to i8*.  We'd much rather use a double load+store rather than
163  // an i64 load+store, here because this improves the odds that the source or
164  // dest address will be promotable.  See if we can find a better type than the
165  // integer datatype.
166  Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts();
167  if (StrippedDest != MI->getArgOperand(0)) {
168    const Type *SrcETy = cast<PointerType>(StrippedDest->getType())
169                                    ->getElementType();
170    if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {
171      // The SrcETy might be something like {{{double}}} or [1 x double].  Rip
172      // down through these levels if so.
173      while (!SrcETy->isSingleValueType()) {
174        if (const StructType *STy = dyn_cast<StructType>(SrcETy)) {
175          if (STy->getNumElements() == 1)
176            SrcETy = STy->getElementType(0);
177          else
178            break;
179        } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) {
180          if (ATy->getNumElements() == 1)
181            SrcETy = ATy->getElementType();
182          else
183            break;
184        } else
185          break;
186      }
187
188      if (SrcETy->isSingleValueType()) {
189        NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp);
190        NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp);
191      }
192    }
193  }
194
195
196  // If the memcpy/memmove provides better alignment info than we can
197  // infer, use it.
198  SrcAlign = std::max(SrcAlign, CopyAlign);
199  DstAlign = std::max(DstAlign, CopyAlign);
200
201  Value *Src = Builder->CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
202  Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
203  Instruction *L = new LoadInst(Src, "tmp", MI->isVolatile(), SrcAlign);
204  InsertNewInstBefore(L, *MI);
205  InsertNewInstBefore(new StoreInst(L, Dest, MI->isVolatile(), DstAlign),
206                      *MI);
207
208  // Set the size of the copy to 0, it will be deleted on the next iteration.
209  MI->setArgOperand(2, Constant::getNullValue(MemOpLength->getType()));
210  return MI;
211}
212
213Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
214  unsigned Alignment = GetOrEnforceKnownAlignment(MI->getDest());
215  if (MI->getAlignment() < Alignment) {
216    MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
217                                             Alignment, false));
218    return MI;
219  }
220
221  // Extract the length and alignment and fill if they are constant.
222  ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
223  ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
224  if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
225    return 0;
226  uint64_t Len = LenC->getZExtValue();
227  Alignment = MI->getAlignment();
228
229  // If the length is zero, this is a no-op
230  if (Len == 0) return MI; // memset(d,c,0,a) -> noop
231
232  // memset(s,c,n) -> store s, c (for n=1,2,4,8)
233  if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
234    const Type *ITy = IntegerType::get(MI->getContext(), Len*8);  // n=1 -> i8.
235
236    Value *Dest = MI->getDest();
237    unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
238    Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
239    Dest = Builder->CreateBitCast(Dest, NewDstPtrTy);
240
241    // Alignment 0 is identity for alignment 1 for memset, but not store.
242    if (Alignment == 0) Alignment = 1;
243
244    // Extract the fill value and store.
245    uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
246    InsertNewInstBefore(new StoreInst(ConstantInt::get(ITy, Fill),
247                                      Dest, false, Alignment), *MI);
248
249    // Set the size of the copy to 0, it will be deleted on the next iteration.
250    MI->setLength(Constant::getNullValue(LenC->getType()));
251    return MI;
252  }
253
254  return 0;
255}
256
257/// visitCallInst - CallInst simplification.  This mostly only handles folding
258/// of intrinsic instructions.  For normal calls, it allows visitCallSite to do
259/// the heavy lifting.
260///
261Instruction *InstCombiner::visitCallInst(CallInst &CI) {
262  if (isFreeCall(&CI))
263    return visitFree(CI);
264  if (isMalloc(&CI))
265    return visitMalloc(CI);
266
267  // If the caller function is nounwind, mark the call as nounwind, even if the
268  // callee isn't.
269  if (CI.getParent()->getParent()->doesNotThrow() &&
270      !CI.doesNotThrow()) {
271    CI.setDoesNotThrow();
272    return &CI;
273  }
274
275  IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
276  if (!II) return visitCallSite(&CI);
277
278  // Intrinsics cannot occur in an invoke, so handle them here instead of in
279  // visitCallSite.
280  if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
281    bool Changed = false;
282
283    // memmove/cpy/set of zero bytes is a noop.
284    if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
285      if (NumBytes->isNullValue())
286        return EraseInstFromFunction(CI);
287
288      if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
289        if (CI->getZExtValue() == 1) {
290          // Replace the instruction with just byte operations.  We would
291          // transform other cases to loads/stores, but we don't know if
292          // alignment is sufficient.
293        }
294    }
295
296    // No other transformations apply to volatile transfers.
297    if (MI->isVolatile())
298      return 0;
299
300    // If we have a memmove and the source operation is a constant global,
301    // then the source and dest pointers can't alias, so we can change this
302    // into a call to memcpy.
303    if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
304      if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
305        if (GVSrc->isConstant()) {
306          Module *M = CI.getParent()->getParent()->getParent();
307          Intrinsic::ID MemCpyID = Intrinsic::memcpy;
308          const Type *Tys[3] = { CI.getArgOperand(0)->getType(),
309                                 CI.getArgOperand(1)->getType(),
310                                 CI.getArgOperand(2)->getType() };
311          CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys, 3));
312          Changed = true;
313        }
314    }
315
316    if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
317      // memmove(x,x,size) -> noop.
318      if (MTI->getSource() == MTI->getDest())
319        return EraseInstFromFunction(CI);
320    }
321
322    // If we can determine a pointer alignment that is bigger than currently
323    // set, update the alignment.
324    if (isa<MemTransferInst>(MI)) {
325      if (Instruction *I = SimplifyMemTransfer(MI))
326        return I;
327    } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
328      if (Instruction *I = SimplifyMemSet(MSI))
329        return I;
330    }
331
332    if (Changed) return II;
333  }
334
335  switch (II->getIntrinsicID()) {
336  default: break;
337  case Intrinsic::objectsize: {
338    // We need target data for just about everything so depend on it.
339    if (!TD) break;
340
341    const Type *ReturnTy = CI.getType();
342    bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1);
343
344    // Get to the real allocated thing and offset as fast as possible.
345    Value *Op1 = II->getArgOperand(0)->stripPointerCasts();
346
347    // If we've stripped down to a single global variable that we
348    // can know the size of then just return that.
349    if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1)) {
350      if (GV->hasDefinitiveInitializer()) {
351        Constant *C = GV->getInitializer();
352        uint64_t GlobalSize = TD->getTypeAllocSize(C->getType());
353        return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, GlobalSize));
354      } else {
355        // Can't determine size of the GV.
356        Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
357        return ReplaceInstUsesWith(CI, RetVal);
358      }
359    } else if (AllocaInst *AI = dyn_cast<AllocaInst>(Op1)) {
360      // Get alloca size.
361      if (AI->getAllocatedType()->isSized()) {
362        uint64_t AllocaSize = TD->getTypeAllocSize(AI->getAllocatedType());
363        if (AI->isArrayAllocation()) {
364          const ConstantInt *C = dyn_cast<ConstantInt>(AI->getArraySize());
365          if (!C) break;
366          AllocaSize *= C->getZExtValue();
367        }
368        return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, AllocaSize));
369      }
370    } else if (CallInst *MI = extractMallocCall(Op1)) {
371      const Type* MallocType = getMallocAllocatedType(MI);
372      // Get alloca size.
373      if (MallocType && MallocType->isSized()) {
374        if (Value *NElems = getMallocArraySize(MI, TD, true)) {
375          if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems))
376        return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy,
377               (NElements->getZExtValue() * TD->getTypeAllocSize(MallocType))));
378        }
379      }
380    } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op1)) {
381      // Only handle constant GEPs here.
382      if (CE->getOpcode() != Instruction::GetElementPtr) break;
383      GEPOperator *GEP = cast<GEPOperator>(CE);
384
385      // Make sure we're not a constant offset from an external
386      // global.
387      Value *Operand = GEP->getPointerOperand();
388      Operand = Operand->stripPointerCasts();
389      if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Operand))
390        if (!GV->hasDefinitiveInitializer()) break;
391
392      // Get what we're pointing to and its size.
393      const PointerType *BaseType =
394        cast<PointerType>(Operand->getType());
395      uint64_t Size = TD->getTypeAllocSize(BaseType->getElementType());
396
397      // Get the current byte offset into the thing. Use the original
398      // operand in case we're looking through a bitcast.
399      SmallVector<Value*, 8> Ops(CE->op_begin()+1, CE->op_end());
400      const PointerType *OffsetType =
401        cast<PointerType>(GEP->getPointerOperand()->getType());
402      uint64_t Offset = TD->getIndexedOffset(OffsetType, &Ops[0], Ops.size());
403
404      if (Size < Offset) {
405        // Out of bound reference? Negative index normalized to large
406        // index? Just return "I don't know".
407        Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
408        return ReplaceInstUsesWith(CI, RetVal);
409      }
410
411      Constant *RetVal = ConstantInt::get(ReturnTy, Size-Offset);
412      return ReplaceInstUsesWith(CI, RetVal);
413    }
414
415    // Do not return "I don't know" here. Later optimization passes could
416    // make it possible to evaluate objectsize to a constant.
417    break;
418  }
419  case Intrinsic::bswap:
420    // bswap(bswap(x)) -> x
421    if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getArgOperand(0)))
422      if (Operand->getIntrinsicID() == Intrinsic::bswap)
423        return ReplaceInstUsesWith(CI, Operand->getArgOperand(0));
424
425    // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
426    if (TruncInst *TI = dyn_cast<TruncInst>(II->getArgOperand(0))) {
427      if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0)))
428        if (Operand->getIntrinsicID() == Intrinsic::bswap) {
429          unsigned C = Operand->getType()->getPrimitiveSizeInBits() -
430                       TI->getType()->getPrimitiveSizeInBits();
431          Value *CV = ConstantInt::get(Operand->getType(), C);
432          Value *V = Builder->CreateLShr(Operand->getArgOperand(0), CV);
433          return new TruncInst(V, TI->getType());
434        }
435    }
436
437    break;
438  case Intrinsic::powi:
439    if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
440      // powi(x, 0) -> 1.0
441      if (Power->isZero())
442        return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0));
443      // powi(x, 1) -> x
444      if (Power->isOne())
445        return ReplaceInstUsesWith(CI, II->getArgOperand(0));
446      // powi(x, -1) -> 1/x
447      if (Power->isAllOnesValue())
448        return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
449                                          II->getArgOperand(0));
450    }
451    break;
452  case Intrinsic::cttz: {
453    // If all bits below the first known one are known zero,
454    // this value is constant.
455    const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
456    uint32_t BitWidth = IT->getBitWidth();
457    APInt KnownZero(BitWidth, 0);
458    APInt KnownOne(BitWidth, 0);
459    ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth),
460                      KnownZero, KnownOne);
461    unsigned TrailingZeros = KnownOne.countTrailingZeros();
462    APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
463    if ((Mask & KnownZero) == Mask)
464      return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
465                                 APInt(BitWidth, TrailingZeros)));
466
467    }
468    break;
469  case Intrinsic::ctlz: {
470    // If all bits above the first known one are known zero,
471    // this value is constant.
472    const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
473    uint32_t BitWidth = IT->getBitWidth();
474    APInt KnownZero(BitWidth, 0);
475    APInt KnownOne(BitWidth, 0);
476    ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth),
477                      KnownZero, KnownOne);
478    unsigned LeadingZeros = KnownOne.countLeadingZeros();
479    APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
480    if ((Mask & KnownZero) == Mask)
481      return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
482                                 APInt(BitWidth, LeadingZeros)));
483
484    }
485    break;
486  case Intrinsic::uadd_with_overflow: {
487    Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
488    const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
489    uint32_t BitWidth = IT->getBitWidth();
490    APInt Mask = APInt::getSignBit(BitWidth);
491    APInt LHSKnownZero(BitWidth, 0);
492    APInt LHSKnownOne(BitWidth, 0);
493    ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne);
494    bool LHSKnownNegative = LHSKnownOne[BitWidth - 1];
495    bool LHSKnownPositive = LHSKnownZero[BitWidth - 1];
496
497    if (LHSKnownNegative || LHSKnownPositive) {
498      APInt RHSKnownZero(BitWidth, 0);
499      APInt RHSKnownOne(BitWidth, 0);
500      ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne);
501      bool RHSKnownNegative = RHSKnownOne[BitWidth - 1];
502      bool RHSKnownPositive = RHSKnownZero[BitWidth - 1];
503      if (LHSKnownNegative && RHSKnownNegative) {
504        // The sign bit is set in both cases: this MUST overflow.
505        // Create a simple add instruction, and insert it into the struct.
506        Instruction *Add = BinaryOperator::CreateAdd(LHS, RHS, "", &CI);
507        Worklist.Add(Add);
508        Constant *V[] = {
509          UndefValue::get(LHS->getType()),ConstantInt::getTrue(II->getContext())
510        };
511        Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
512        return InsertValueInst::Create(Struct, Add, 0);
513      }
514
515      if (LHSKnownPositive && RHSKnownPositive) {
516        // The sign bit is clear in both cases: this CANNOT overflow.
517        // Create a simple add instruction, and insert it into the struct.
518        Instruction *Add = BinaryOperator::CreateNUWAdd(LHS, RHS, "", &CI);
519        Worklist.Add(Add);
520        Constant *V[] = {
521          UndefValue::get(LHS->getType()),
522          ConstantInt::getFalse(II->getContext())
523        };
524        Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
525        return InsertValueInst::Create(Struct, Add, 0);
526      }
527    }
528  }
529  // FALL THROUGH uadd into sadd
530  case Intrinsic::sadd_with_overflow:
531    // Canonicalize constants into the RHS.
532    if (isa<Constant>(II->getArgOperand(0)) &&
533        !isa<Constant>(II->getArgOperand(1))) {
534      Value *LHS = II->getArgOperand(0);
535      II->setArgOperand(0, II->getArgOperand(1));
536      II->setArgOperand(1, LHS);
537      return II;
538    }
539
540    // X + undef -> undef
541    if (isa<UndefValue>(II->getArgOperand(1)))
542      return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
543
544    if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
545      // X + 0 -> {X, false}
546      if (RHS->isZero()) {
547        Constant *V[] = {
548          UndefValue::get(II->getArgOperand(0)->getType()),
549          ConstantInt::getFalse(II->getContext())
550        };
551        Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
552        return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
553      }
554    }
555    break;
556  case Intrinsic::usub_with_overflow:
557  case Intrinsic::ssub_with_overflow:
558    // undef - X -> undef
559    // X - undef -> undef
560    if (isa<UndefValue>(II->getArgOperand(0)) ||
561        isa<UndefValue>(II->getArgOperand(1)))
562      return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
563
564    if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
565      // X - 0 -> {X, false}
566      if (RHS->isZero()) {
567        Constant *V[] = {
568          UndefValue::get(II->getArgOperand(0)->getType()),
569          ConstantInt::getFalse(II->getContext())
570        };
571        Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
572        return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
573      }
574    }
575    break;
576  case Intrinsic::umul_with_overflow:
577  case Intrinsic::smul_with_overflow:
578    // Canonicalize constants into the RHS.
579    if (isa<Constant>(II->getArgOperand(0)) &&
580        !isa<Constant>(II->getArgOperand(1))) {
581      Value *LHS = II->getArgOperand(0);
582      II->setArgOperand(0, II->getArgOperand(1));
583      II->setArgOperand(1, LHS);
584      return II;
585    }
586
587    // X * undef -> undef
588    if (isa<UndefValue>(II->getArgOperand(1)))
589      return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
590
591    if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
592      // X*0 -> {0, false}
593      if (RHSI->isZero())
594        return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType()));
595
596      // X * 1 -> {X, false}
597      if (RHSI->equalsInt(1)) {
598        Constant *V[] = {
599          UndefValue::get(II->getArgOperand(0)->getType()),
600          ConstantInt::getFalse(II->getContext())
601        };
602        Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
603        return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
604      }
605    }
606    break;
607  case Intrinsic::ppc_altivec_lvx:
608  case Intrinsic::ppc_altivec_lvxl:
609  case Intrinsic::x86_sse_loadu_ps:
610  case Intrinsic::x86_sse2_loadu_pd:
611  case Intrinsic::x86_sse2_loadu_dq:
612    // Turn PPC lvx     -> load if the pointer is known aligned.
613    // Turn X86 loadups -> load if the pointer is known aligned.
614    if (GetOrEnforceKnownAlignment(II->getArgOperand(0), 16) >= 16) {
615      Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
616                                         PointerType::getUnqual(II->getType()));
617      return new LoadInst(Ptr);
618    }
619    break;
620  case Intrinsic::ppc_altivec_stvx:
621  case Intrinsic::ppc_altivec_stvxl:
622    // Turn stvx -> store if the pointer is known aligned.
623    if (GetOrEnforceKnownAlignment(II->getArgOperand(1), 16) >= 16) {
624      const Type *OpPtrTy =
625        PointerType::getUnqual(II->getArgOperand(0)->getType());
626      Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
627      return new StoreInst(II->getArgOperand(0), Ptr);
628    }
629    break;
630  case Intrinsic::x86_sse_storeu_ps:
631  case Intrinsic::x86_sse2_storeu_pd:
632  case Intrinsic::x86_sse2_storeu_dq:
633    // Turn X86 storeu -> store if the pointer is known aligned.
634    if (GetOrEnforceKnownAlignment(II->getArgOperand(0), 16) >= 16) {
635      const Type *OpPtrTy =
636        PointerType::getUnqual(II->getArgOperand(1)->getType());
637      Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy);
638      return new StoreInst(II->getArgOperand(1), Ptr);
639    }
640    break;
641
642  case Intrinsic::x86_sse_cvttss2si: {
643    // These intrinsics only demands the 0th element of its input vector.  If
644    // we can simplify the input based on that, do so now.
645    unsigned VWidth =
646      cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements();
647    APInt DemandedElts(VWidth, 1);
648    APInt UndefElts(VWidth, 0);
649    if (Value *V = SimplifyDemandedVectorElts(II->getArgOperand(0),
650                                              DemandedElts, UndefElts)) {
651      II->setArgOperand(0, V);
652      return II;
653    }
654    break;
655  }
656
657  case Intrinsic::ppc_altivec_vperm:
658    // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
659    if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getArgOperand(2))) {
660      assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!");
661
662      // Check that all of the elements are integer constants or undefs.
663      bool AllEltsOk = true;
664      for (unsigned i = 0; i != 16; ++i) {
665        if (!isa<ConstantInt>(Mask->getOperand(i)) &&
666            !isa<UndefValue>(Mask->getOperand(i))) {
667          AllEltsOk = false;
668          break;
669        }
670      }
671
672      if (AllEltsOk) {
673        // Cast the input vectors to byte vectors.
674        Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0),
675                                            Mask->getType());
676        Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1),
677                                            Mask->getType());
678        Value *Result = UndefValue::get(Op0->getType());
679
680        // Only extract each element once.
681        Value *ExtractedElts[32];
682        memset(ExtractedElts, 0, sizeof(ExtractedElts));
683
684        for (unsigned i = 0; i != 16; ++i) {
685          if (isa<UndefValue>(Mask->getOperand(i)))
686            continue;
687          unsigned Idx=cast<ConstantInt>(Mask->getOperand(i))->getZExtValue();
688          Idx &= 31;  // Match the hardware behavior.
689
690          if (ExtractedElts[Idx] == 0) {
691            ExtractedElts[Idx] =
692              Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1,
693                  ConstantInt::get(Type::getInt32Ty(II->getContext()),
694                                   Idx&15, false), "tmp");
695          }
696
697          // Insert this value into the result vector.
698          Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx],
699                         ConstantInt::get(Type::getInt32Ty(II->getContext()),
700                                          i, false), "tmp");
701        }
702        return CastInst::Create(Instruction::BitCast, Result, CI.getType());
703      }
704    }
705    break;
706
707  case Intrinsic::arm_neon_vld1:
708  case Intrinsic::arm_neon_vld2:
709  case Intrinsic::arm_neon_vld3:
710  case Intrinsic::arm_neon_vld4:
711  case Intrinsic::arm_neon_vld2lane:
712  case Intrinsic::arm_neon_vld3lane:
713  case Intrinsic::arm_neon_vld4lane:
714  case Intrinsic::arm_neon_vst1:
715  case Intrinsic::arm_neon_vst2:
716  case Intrinsic::arm_neon_vst3:
717  case Intrinsic::arm_neon_vst4:
718  case Intrinsic::arm_neon_vst2lane:
719  case Intrinsic::arm_neon_vst3lane:
720  case Intrinsic::arm_neon_vst4lane: {
721    unsigned MemAlign = GetOrEnforceKnownAlignment(II->getArgOperand(0));
722    unsigned AlignArg = II->getNumArgOperands() - 1;
723    ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
724    if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
725      II->setArgOperand(AlignArg,
726                        ConstantInt::get(Type::getInt32Ty(II->getContext()),
727                                         MemAlign, false));
728      return II;
729    }
730    break;
731  }
732
733  case Intrinsic::stackrestore: {
734    // If the save is right next to the restore, remove the restore.  This can
735    // happen when variable allocas are DCE'd.
736    if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
737      if (SS->getIntrinsicID() == Intrinsic::stacksave) {
738        BasicBlock::iterator BI = SS;
739        if (&*++BI == II)
740          return EraseInstFromFunction(CI);
741      }
742    }
743
744    // Scan down this block to see if there is another stack restore in the
745    // same block without an intervening call/alloca.
746    BasicBlock::iterator BI = II;
747    TerminatorInst *TI = II->getParent()->getTerminator();
748    bool CannotRemove = false;
749    for (++BI; &*BI != TI; ++BI) {
750      if (isa<AllocaInst>(BI) || isMalloc(BI)) {
751        CannotRemove = true;
752        break;
753      }
754      if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
755        if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
756          // If there is a stackrestore below this one, remove this one.
757          if (II->getIntrinsicID() == Intrinsic::stackrestore)
758            return EraseInstFromFunction(CI);
759          // Otherwise, ignore the intrinsic.
760        } else {
761          // If we found a non-intrinsic call, we can't remove the stack
762          // restore.
763          CannotRemove = true;
764          break;
765        }
766      }
767    }
768
769    // If the stack restore is in a return/unwind block and if there are no
770    // allocas or calls between the restore and the return, nuke the restore.
771    if (!CannotRemove && (isa<ReturnInst>(TI) || isa<UnwindInst>(TI)))
772      return EraseInstFromFunction(CI);
773    break;
774  }
775  }
776
777  return visitCallSite(II);
778}
779
780// InvokeInst simplification
781//
782Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
783  return visitCallSite(&II);
784}
785
786/// isSafeToEliminateVarargsCast - If this cast does not affect the value
787/// passed through the varargs area, we can eliminate the use of the cast.
788static bool isSafeToEliminateVarargsCast(const CallSite CS,
789                                         const CastInst * const CI,
790                                         const TargetData * const TD,
791                                         const int ix) {
792  if (!CI->isLosslessCast())
793    return false;
794
795  // The size of ByVal arguments is derived from the type, so we
796  // can't change to a type with a different size.  If the size were
797  // passed explicitly we could avoid this check.
798  if (!CS.paramHasAttr(ix, Attribute::ByVal))
799    return true;
800
801  const Type* SrcTy =
802            cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
803  const Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
804  if (!SrcTy->isSized() || !DstTy->isSized())
805    return false;
806  if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy))
807    return false;
808  return true;
809}
810
811namespace {
812class InstCombineFortifiedLibCalls : public SimplifyFortifiedLibCalls {
813  InstCombiner *IC;
814protected:
815  void replaceCall(Value *With) {
816    NewInstruction = IC->ReplaceInstUsesWith(*CI, With);
817  }
818  bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp, bool isString) const {
819    if (ConstantInt *SizeCI =
820                           dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp))) {
821      if (SizeCI->isAllOnesValue())
822        return true;
823      if (isString)
824        return SizeCI->getZExtValue() >=
825               GetStringLength(CI->getArgOperand(SizeArgOp));
826      if (ConstantInt *Arg = dyn_cast<ConstantInt>(
827                                                  CI->getArgOperand(SizeArgOp)))
828        return SizeCI->getZExtValue() >= Arg->getZExtValue();
829    }
830    return false;
831  }
832public:
833  InstCombineFortifiedLibCalls(InstCombiner *IC) : IC(IC), NewInstruction(0) { }
834  Instruction *NewInstruction;
835};
836} // end anonymous namespace
837
838// Try to fold some different type of calls here.
839// Currently we're only working with the checking functions, memcpy_chk,
840// mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk,
841// strcat_chk and strncat_chk.
842Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const TargetData *TD) {
843  if (CI->getCalledFunction() == 0) return 0;
844
845  InstCombineFortifiedLibCalls Simplifier(this);
846  Simplifier.fold(CI, TD);
847  return Simplifier.NewInstruction;
848}
849
850// visitCallSite - Improvements for call and invoke instructions.
851//
852Instruction *InstCombiner::visitCallSite(CallSite CS) {
853  bool Changed = false;
854
855  // If the callee is a pointer to a function, attempt to move any casts to the
856  // arguments of the call/invoke.
857  Value *Callee = CS.getCalledValue();
858  if (!isa<Function>(Callee) && transformConstExprCastCall(CS))
859    return 0;
860
861  if (Function *CalleeF = dyn_cast<Function>(Callee))
862    // If the call and callee calling conventions don't match, this call must
863    // be unreachable, as the call is undefined.
864    if (CalleeF->getCallingConv() != CS.getCallingConv() &&
865        // Only do this for calls to a function with a body.  A prototype may
866        // not actually end up matching the implementation's calling conv for a
867        // variety of reasons (e.g. it may be written in assembly).
868        !CalleeF->isDeclaration()) {
869      Instruction *OldCall = CS.getInstruction();
870      new StoreInst(ConstantInt::getTrue(Callee->getContext()),
871                UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
872                                  OldCall);
873      // If OldCall dues not return void then replaceAllUsesWith undef.
874      // This allows ValueHandlers and custom metadata to adjust itself.
875      if (!OldCall->getType()->isVoidTy())
876        OldCall->replaceAllUsesWith(UndefValue::get(OldCall->getType()));
877      if (isa<CallInst>(OldCall))
878        return EraseInstFromFunction(*OldCall);
879
880      // We cannot remove an invoke, because it would change the CFG, just
881      // change the callee to a null pointer.
882      cast<InvokeInst>(OldCall)->setCalledFunction(
883                                    Constant::getNullValue(CalleeF->getType()));
884      return 0;
885    }
886
887  if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
888    // This instruction is not reachable, just remove it.  We insert a store to
889    // undef so that we know that this code is not reachable, despite the fact
890    // that we can't modify the CFG here.
891    new StoreInst(ConstantInt::getTrue(Callee->getContext()),
892               UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
893                  CS.getInstruction());
894
895    // If CS does not return void then replaceAllUsesWith undef.
896    // This allows ValueHandlers and custom metadata to adjust itself.
897    if (!CS.getInstruction()->getType()->isVoidTy())
898      CS.getInstruction()->
899        replaceAllUsesWith(UndefValue::get(CS.getInstruction()->getType()));
900
901    if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
902      // Don't break the CFG, insert a dummy cond branch.
903      BranchInst::Create(II->getNormalDest(), II->getUnwindDest(),
904                         ConstantInt::getTrue(Callee->getContext()), II);
905    }
906    return EraseInstFromFunction(*CS.getInstruction());
907  }
908
909  if (BitCastInst *BC = dyn_cast<BitCastInst>(Callee))
910    if (IntrinsicInst *In = dyn_cast<IntrinsicInst>(BC->getOperand(0)))
911      if (In->getIntrinsicID() == Intrinsic::init_trampoline)
912        return transformCallThroughTrampoline(CS);
913
914  const PointerType *PTy = cast<PointerType>(Callee->getType());
915  const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
916  if (FTy->isVarArg()) {
917    int ix = FTy->getNumParams() + (isa<InvokeInst>(Callee) ? 3 : 1);
918    // See if we can optimize any arguments passed through the varargs area of
919    // the call.
920    for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(),
921           E = CS.arg_end(); I != E; ++I, ++ix) {
922      CastInst *CI = dyn_cast<CastInst>(*I);
923      if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) {
924        *I = CI->getOperand(0);
925        Changed = true;
926      }
927    }
928  }
929
930  if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
931    // Inline asm calls cannot throw - mark them 'nounwind'.
932    CS.setDoesNotThrow();
933    Changed = true;
934  }
935
936  // Try to optimize the call if possible, we require TargetData for most of
937  // this.  None of these calls are seen as possibly dead so go ahead and
938  // delete the instruction now.
939  if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
940    Instruction *I = tryOptimizeCall(CI, TD);
941    // If we changed something return the result, etc. Otherwise let
942    // the fallthrough check.
943    if (I) return EraseInstFromFunction(*I);
944  }
945
946  return Changed ? CS.getInstruction() : 0;
947}
948
949// transformConstExprCastCall - If the callee is a constexpr cast of a function,
950// attempt to move the cast to the arguments of the call/invoke.
951//
952bool InstCombiner::transformConstExprCastCall(CallSite CS) {
953  Function *Callee =
954    dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
955  if (Callee == 0)
956    return false;
957  Instruction *Caller = CS.getInstruction();
958  const AttrListPtr &CallerPAL = CS.getAttributes();
959
960  // Okay, this is a cast from a function to a different type.  Unless doing so
961  // would cause a type conversion of one of our arguments, change this call to
962  // be a direct call with arguments casted to the appropriate types.
963  //
964  const FunctionType *FT = Callee->getFunctionType();
965  const Type *OldRetTy = Caller->getType();
966  const Type *NewRetTy = FT->getReturnType();
967
968  if (NewRetTy->isStructTy())
969    return false; // TODO: Handle multiple return values.
970
971  // Check to see if we are changing the return type...
972  if (OldRetTy != NewRetTy) {
973    if (Callee->isDeclaration() &&
974        // Conversion is ok if changing from one pointer type to another or from
975        // a pointer to an integer of the same size.
976        !((OldRetTy->isPointerTy() || !TD ||
977           OldRetTy == TD->getIntPtrType(Caller->getContext())) &&
978          (NewRetTy->isPointerTy() || !TD ||
979           NewRetTy == TD->getIntPtrType(Caller->getContext()))))
980      return false;   // Cannot transform this return value.
981
982    if (!Caller->use_empty() &&
983        // void -> non-void is handled specially
984        !NewRetTy->isVoidTy() && !CastInst::isCastable(NewRetTy, OldRetTy))
985      return false;   // Cannot transform this return value.
986
987    if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
988      Attributes RAttrs = CallerPAL.getRetAttributes();
989      if (RAttrs & Attribute::typeIncompatible(NewRetTy))
990        return false;   // Attribute not compatible with transformed value.
991    }
992
993    // If the callsite is an invoke instruction, and the return value is used by
994    // a PHI node in a successor, we cannot change the return type of the call
995    // because there is no place to put the cast instruction (without breaking
996    // the critical edge).  Bail out in this case.
997    if (!Caller->use_empty())
998      if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
999        for (Value::use_iterator UI = II->use_begin(), E = II->use_end();
1000             UI != E; ++UI)
1001          if (PHINode *PN = dyn_cast<PHINode>(*UI))
1002            if (PN->getParent() == II->getNormalDest() ||
1003                PN->getParent() == II->getUnwindDest())
1004              return false;
1005  }
1006
1007  unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin());
1008  unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
1009
1010  CallSite::arg_iterator AI = CS.arg_begin();
1011  for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
1012    const Type *ParamTy = FT->getParamType(i);
1013    const Type *ActTy = (*AI)->getType();
1014
1015    if (!CastInst::isCastable(ActTy, ParamTy))
1016      return false;   // Cannot transform this parameter value.
1017
1018    unsigned Attrs = CallerPAL.getParamAttributes(i + 1);
1019    if (Attrs & Attribute::typeIncompatible(ParamTy))
1020      return false;   // Attribute not compatible with transformed value.
1021
1022    // If the parameter is passed as a byval argument, then we have to have a
1023    // sized type and the sized type has to have the same size as the old type.
1024    if (ParamTy != ActTy && (Attrs & Attribute::ByVal)) {
1025      const PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
1026      if (ParamPTy == 0 || !ParamPTy->getElementType()->isSized() || TD == 0)
1027        return false;
1028
1029      const Type *CurElTy = cast<PointerType>(ActTy)->getElementType();
1030      if (TD->getTypeAllocSize(CurElTy) !=
1031          TD->getTypeAllocSize(ParamPTy->getElementType()))
1032        return false;
1033    }
1034
1035    // Converting from one pointer type to another or between a pointer and an
1036    // integer of the same size is safe even if we do not have a body.
1037    bool isConvertible = ActTy == ParamTy ||
1038      (TD && ((ParamTy->isPointerTy() ||
1039      ParamTy == TD->getIntPtrType(Caller->getContext())) &&
1040              (ActTy->isPointerTy() ||
1041              ActTy == TD->getIntPtrType(Caller->getContext()))));
1042    if (Callee->isDeclaration() && !isConvertible) return false;
1043  }
1044
1045  if (FT->getNumParams() < NumActualArgs && !FT->isVarArg() &&
1046      Callee->isDeclaration())
1047    return false;   // Do not delete arguments unless we have a function body.
1048
1049  if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
1050      !CallerPAL.isEmpty())
1051    // In this case we have more arguments than the new function type, but we
1052    // won't be dropping them.  Check that these extra arguments have attributes
1053    // that are compatible with being a vararg call argument.
1054    for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
1055      if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams())
1056        break;
1057      Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs;
1058      if (PAttrs & Attribute::VarArgsIncompatible)
1059        return false;
1060    }
1061
1062  // Okay, we decided that this is a safe thing to do: go ahead and start
1063  // inserting cast instructions as necessary...
1064  std::vector<Value*> Args;
1065  Args.reserve(NumActualArgs);
1066  SmallVector<AttributeWithIndex, 8> attrVec;
1067  attrVec.reserve(NumCommonArgs);
1068
1069  // Get any return attributes.
1070  Attributes RAttrs = CallerPAL.getRetAttributes();
1071
1072  // If the return value is not being used, the type may not be compatible
1073  // with the existing attributes.  Wipe out any problematic attributes.
1074  RAttrs &= ~Attribute::typeIncompatible(NewRetTy);
1075
1076  // Add the new return attributes.
1077  if (RAttrs)
1078    attrVec.push_back(AttributeWithIndex::get(0, RAttrs));
1079
1080  AI = CS.arg_begin();
1081  for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
1082    const Type *ParamTy = FT->getParamType(i);
1083    if ((*AI)->getType() == ParamTy) {
1084      Args.push_back(*AI);
1085    } else {
1086      Instruction::CastOps opcode = CastInst::getCastOpcode(*AI,
1087          false, ParamTy, false);
1088      Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy, "tmp"));
1089    }
1090
1091    // Add any parameter attributes.
1092    if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
1093      attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
1094  }
1095
1096  // If the function takes more arguments than the call was taking, add them
1097  // now.
1098  for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i)
1099    Args.push_back(Constant::getNullValue(FT->getParamType(i)));
1100
1101  // If we are removing arguments to the function, emit an obnoxious warning.
1102  if (FT->getNumParams() < NumActualArgs) {
1103    if (!FT->isVarArg()) {
1104      errs() << "WARNING: While resolving call to function '"
1105             << Callee->getName() << "' arguments were dropped!\n";
1106    } else {
1107      // Add all of the arguments in their promoted form to the arg list.
1108      for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
1109        const Type *PTy = getPromotedType((*AI)->getType());
1110        if (PTy != (*AI)->getType()) {
1111          // Must promote to pass through va_arg area!
1112          Instruction::CastOps opcode =
1113            CastInst::getCastOpcode(*AI, false, PTy, false);
1114          Args.push_back(Builder->CreateCast(opcode, *AI, PTy, "tmp"));
1115        } else {
1116          Args.push_back(*AI);
1117        }
1118
1119        // Add any parameter attributes.
1120        if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
1121          attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
1122      }
1123    }
1124  }
1125
1126  if (Attributes FnAttrs =  CallerPAL.getFnAttributes())
1127    attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs));
1128
1129  if (NewRetTy->isVoidTy())
1130    Caller->setName("");   // Void type should not have a name.
1131
1132  const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(),
1133                                                     attrVec.end());
1134
1135  Instruction *NC;
1136  if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1137    NC = InvokeInst::Create(Callee, II->getNormalDest(), II->getUnwindDest(),
1138                            Args.begin(), Args.end(),
1139                            Caller->getName(), Caller);
1140    cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv());
1141    cast<InvokeInst>(NC)->setAttributes(NewCallerPAL);
1142  } else {
1143    NC = CallInst::Create(Callee, Args.begin(), Args.end(),
1144                          Caller->getName(), Caller);
1145    CallInst *CI = cast<CallInst>(Caller);
1146    if (CI->isTailCall())
1147      cast<CallInst>(NC)->setTailCall();
1148    cast<CallInst>(NC)->setCallingConv(CI->getCallingConv());
1149    cast<CallInst>(NC)->setAttributes(NewCallerPAL);
1150  }
1151
1152  // Insert a cast of the return type as necessary.
1153  Value *NV = NC;
1154  if (OldRetTy != NV->getType() && !Caller->use_empty()) {
1155    if (!NV->getType()->isVoidTy()) {
1156      Instruction::CastOps opcode =
1157        CastInst::getCastOpcode(NC, false, OldRetTy, false);
1158      NV = NC = CastInst::Create(opcode, NC, OldRetTy, "tmp");
1159
1160      // If this is an invoke instruction, we should insert it after the first
1161      // non-phi, instruction in the normal successor block.
1162      if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1163        BasicBlock::iterator I = II->getNormalDest()->getFirstNonPHI();
1164        InsertNewInstBefore(NC, *I);
1165      } else {
1166        // Otherwise, it's a call, just insert cast right after the call.
1167        InsertNewInstBefore(NC, *Caller);
1168      }
1169      Worklist.AddUsersToWorkList(*Caller);
1170    } else {
1171      NV = UndefValue::get(Caller->getType());
1172    }
1173  }
1174
1175  if (!Caller->use_empty())
1176    Caller->replaceAllUsesWith(NV);
1177
1178  EraseInstFromFunction(*Caller);
1179  return true;
1180}
1181
1182// transformCallThroughTrampoline - Turn a call to a function created by the
1183// init_trampoline intrinsic into a direct call to the underlying function.
1184//
1185Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) {
1186  Value *Callee = CS.getCalledValue();
1187  const PointerType *PTy = cast<PointerType>(Callee->getType());
1188  const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
1189  const AttrListPtr &Attrs = CS.getAttributes();
1190
1191  // If the call already has the 'nest' attribute somewhere then give up -
1192  // otherwise 'nest' would occur twice after splicing in the chain.
1193  if (Attrs.hasAttrSomewhere(Attribute::Nest))
1194    return 0;
1195
1196  IntrinsicInst *Tramp =
1197    cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0));
1198
1199  Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts());
1200  const PointerType *NestFPTy = cast<PointerType>(NestF->getType());
1201  const FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
1202
1203  const AttrListPtr &NestAttrs = NestF->getAttributes();
1204  if (!NestAttrs.isEmpty()) {
1205    unsigned NestIdx = 1;
1206    const Type *NestTy = 0;
1207    Attributes NestAttr = Attribute::None;
1208
1209    // Look for a parameter marked with the 'nest' attribute.
1210    for (FunctionType::param_iterator I = NestFTy->param_begin(),
1211         E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
1212      if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) {
1213        // Record the parameter type and any other attributes.
1214        NestTy = *I;
1215        NestAttr = NestAttrs.getParamAttributes(NestIdx);
1216        break;
1217      }
1218
1219    if (NestTy) {
1220      Instruction *Caller = CS.getInstruction();
1221      std::vector<Value*> NewArgs;
1222      NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1);
1223
1224      SmallVector<AttributeWithIndex, 8> NewAttrs;
1225      NewAttrs.reserve(Attrs.getNumSlots() + 1);
1226
1227      // Insert the nest argument into the call argument list, which may
1228      // mean appending it.  Likewise for attributes.
1229
1230      // Add any result attributes.
1231      if (Attributes Attr = Attrs.getRetAttributes())
1232        NewAttrs.push_back(AttributeWithIndex::get(0, Attr));
1233
1234      {
1235        unsigned Idx = 1;
1236        CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
1237        do {
1238          if (Idx == NestIdx) {
1239            // Add the chain argument and attributes.
1240            Value *NestVal = Tramp->getArgOperand(2);
1241            if (NestVal->getType() != NestTy)
1242              NestVal = new BitCastInst(NestVal, NestTy, "nest", Caller);
1243            NewArgs.push_back(NestVal);
1244            NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr));
1245          }
1246
1247          if (I == E)
1248            break;
1249
1250          // Add the original argument and attributes.
1251          NewArgs.push_back(*I);
1252          if (Attributes Attr = Attrs.getParamAttributes(Idx))
1253            NewAttrs.push_back
1254              (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr));
1255
1256          ++Idx, ++I;
1257        } while (1);
1258      }
1259
1260      // Add any function attributes.
1261      if (Attributes Attr = Attrs.getFnAttributes())
1262        NewAttrs.push_back(AttributeWithIndex::get(~0, Attr));
1263
1264      // The trampoline may have been bitcast to a bogus type (FTy).
1265      // Handle this by synthesizing a new function type, equal to FTy
1266      // with the chain parameter inserted.
1267
1268      std::vector<const Type*> NewTypes;
1269      NewTypes.reserve(FTy->getNumParams()+1);
1270
1271      // Insert the chain's type into the list of parameter types, which may
1272      // mean appending it.
1273      {
1274        unsigned Idx = 1;
1275        FunctionType::param_iterator I = FTy->param_begin(),
1276          E = FTy->param_end();
1277
1278        do {
1279          if (Idx == NestIdx)
1280            // Add the chain's type.
1281            NewTypes.push_back(NestTy);
1282
1283          if (I == E)
1284            break;
1285
1286          // Add the original type.
1287          NewTypes.push_back(*I);
1288
1289          ++Idx, ++I;
1290        } while (1);
1291      }
1292
1293      // Replace the trampoline call with a direct call.  Let the generic
1294      // code sort out any function type mismatches.
1295      FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
1296                                                FTy->isVarArg());
1297      Constant *NewCallee =
1298        NestF->getType() == PointerType::getUnqual(NewFTy) ?
1299        NestF : ConstantExpr::getBitCast(NestF,
1300                                         PointerType::getUnqual(NewFTy));
1301      const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(),
1302                                                   NewAttrs.end());
1303
1304      Instruction *NewCaller;
1305      if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1306        NewCaller = InvokeInst::Create(NewCallee,
1307                                       II->getNormalDest(), II->getUnwindDest(),
1308                                       NewArgs.begin(), NewArgs.end(),
1309                                       Caller->getName(), Caller);
1310        cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
1311        cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
1312      } else {
1313        NewCaller = CallInst::Create(NewCallee, NewArgs.begin(), NewArgs.end(),
1314                                     Caller->getName(), Caller);
1315        if (cast<CallInst>(Caller)->isTailCall())
1316          cast<CallInst>(NewCaller)->setTailCall();
1317        cast<CallInst>(NewCaller)->
1318          setCallingConv(cast<CallInst>(Caller)->getCallingConv());
1319        cast<CallInst>(NewCaller)->setAttributes(NewPAL);
1320      }
1321      if (!Caller->getType()->isVoidTy())
1322        Caller->replaceAllUsesWith(NewCaller);
1323      Caller->eraseFromParent();
1324      Worklist.Remove(Caller);
1325      return 0;
1326    }
1327  }
1328
1329  // Replace the trampoline call with a direct call.  Since there is no 'nest'
1330  // parameter, there is no need to adjust the argument list.  Let the generic
1331  // code sort out any function type mismatches.
1332  Constant *NewCallee =
1333    NestF->getType() == PTy ? NestF :
1334                              ConstantExpr::getBitCast(NestF, PTy);
1335  CS.setCalledFunction(NewCallee);
1336  return CS.getInstruction();
1337}
1338
1339