InstCombineCalls.cpp revision 03fceff6f69a0261a767aab8e62de8aa9301b86c
1//===- InstCombineCalls.cpp -----------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the visitCall and visitInvoke functions.
11//
12//===----------------------------------------------------------------------===//
13
14#include "InstCombine.h"
15#include "llvm/ADT/Statistic.h"
16#include "llvm/Analysis/MemoryBuiltins.h"
17#include "llvm/IR/DataLayout.h"
18#include "llvm/Support/CallSite.h"
19#include "llvm/Support/PatternMatch.h"
20#include "llvm/Transforms/Utils/BuildLibCalls.h"
21#include "llvm/Transforms/Utils/Local.h"
22using namespace llvm;
23using namespace PatternMatch;
24
25STATISTIC(NumSimplified, "Number of library calls simplified");
26
27/// getPromotedType - Return the specified type promoted as it would be to pass
28/// though a va_arg area.
29static Type *getPromotedType(Type *Ty) {
30  if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
31    if (ITy->getBitWidth() < 32)
32      return Type::getInt32Ty(Ty->getContext());
33  }
34  return Ty;
35}
36
37/// reduceToSingleValueType - Given an aggregate type which ultimately holds a
38/// single scalar element, like {{{type}}} or [1 x type], return type.
39static Type *reduceToSingleValueType(Type *T) {
40  while (!T->isSingleValueType()) {
41    if (StructType *STy = dyn_cast<StructType>(T)) {
42      if (STy->getNumElements() == 1)
43        T = STy->getElementType(0);
44      else
45        break;
46    } else if (ArrayType *ATy = dyn_cast<ArrayType>(T)) {
47      if (ATy->getNumElements() == 1)
48        T = ATy->getElementType();
49      else
50        break;
51    } else
52      break;
53  }
54
55  return T;
56}
57
58Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
59  unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), TD);
60  unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), TD);
61  unsigned MinAlign = std::min(DstAlign, SrcAlign);
62  unsigned CopyAlign = MI->getAlignment();
63
64  if (CopyAlign < MinAlign) {
65    MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
66                                             MinAlign, false));
67    return MI;
68  }
69
70  // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
71  // load/store.
72  ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getArgOperand(2));
73  if (MemOpLength == 0) return 0;
74
75  // Source and destination pointer types are always "i8*" for intrinsic.  See
76  // if the size is something we can handle with a single primitive load/store.
77  // A single load+store correctly handles overlapping memory in the memmove
78  // case.
79  uint64_t Size = MemOpLength->getLimitedValue();
80  assert(Size && "0-sized memory transfering should be removed already.");
81
82  if (Size > 8 || (Size&(Size-1)))
83    return 0;  // If not 1/2/4/8 bytes, exit.
84
85  // Use an integer load+store unless we can find something better.
86  unsigned SrcAddrSp =
87    cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
88  unsigned DstAddrSp =
89    cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
90
91  IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
92  Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
93  Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
94
95  // Memcpy forces the use of i8* for the source and destination.  That means
96  // that if you're using memcpy to move one double around, you'll get a cast
97  // from double* to i8*.  We'd much rather use a double load+store rather than
98  // an i64 load+store, here because this improves the odds that the source or
99  // dest address will be promotable.  See if we can find a better type than the
100  // integer datatype.
101  Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts();
102  MDNode *CopyMD = 0;
103  if (StrippedDest != MI->getArgOperand(0)) {
104    Type *SrcETy = cast<PointerType>(StrippedDest->getType())
105                                    ->getElementType();
106    if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {
107      // The SrcETy might be something like {{{double}}} or [1 x double].  Rip
108      // down through these levels if so.
109      SrcETy = reduceToSingleValueType(SrcETy);
110
111      if (SrcETy->isSingleValueType()) {
112        NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp);
113        NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp);
114
115        // If the memcpy has metadata describing the members, see if we can
116        // get the TBAA tag describing our copy.
117        if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa_struct)) {
118          if (M->getNumOperands() == 3 &&
119              M->getOperand(0) &&
120              isa<ConstantInt>(M->getOperand(0)) &&
121              cast<ConstantInt>(M->getOperand(0))->isNullValue() &&
122              M->getOperand(1) &&
123              isa<ConstantInt>(M->getOperand(1)) &&
124              cast<ConstantInt>(M->getOperand(1))->getValue() == Size &&
125              M->getOperand(2) &&
126              isa<MDNode>(M->getOperand(2)))
127            CopyMD = cast<MDNode>(M->getOperand(2));
128        }
129      }
130    }
131  }
132
133  // If the memcpy/memmove provides better alignment info than we can
134  // infer, use it.
135  SrcAlign = std::max(SrcAlign, CopyAlign);
136  DstAlign = std::max(DstAlign, CopyAlign);
137
138  Value *Src = Builder->CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
139  Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
140  LoadInst *L = Builder->CreateLoad(Src, MI->isVolatile());
141  L->setAlignment(SrcAlign);
142  if (CopyMD)
143    L->setMetadata(LLVMContext::MD_tbaa, CopyMD);
144  StoreInst *S = Builder->CreateStore(L, Dest, MI->isVolatile());
145  S->setAlignment(DstAlign);
146  if (CopyMD)
147    S->setMetadata(LLVMContext::MD_tbaa, CopyMD);
148
149  // Set the size of the copy to 0, it will be deleted on the next iteration.
150  MI->setArgOperand(2, Constant::getNullValue(MemOpLength->getType()));
151  return MI;
152}
153
154Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
155  unsigned Alignment = getKnownAlignment(MI->getDest(), TD);
156  if (MI->getAlignment() < Alignment) {
157    MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
158                                             Alignment, false));
159    return MI;
160  }
161
162  // Extract the length and alignment and fill if they are constant.
163  ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
164  ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
165  if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
166    return 0;
167  uint64_t Len = LenC->getLimitedValue();
168  Alignment = MI->getAlignment();
169  assert(Len && "0-sized memory setting should be removed already.");
170
171  // memset(s,c,n) -> store s, c (for n=1,2,4,8)
172  if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
173    Type *ITy = IntegerType::get(MI->getContext(), Len*8);  // n=1 -> i8.
174
175    Value *Dest = MI->getDest();
176    unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
177    Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
178    Dest = Builder->CreateBitCast(Dest, NewDstPtrTy);
179
180    // Alignment 0 is identity for alignment 1 for memset, but not store.
181    if (Alignment == 0) Alignment = 1;
182
183    // Extract the fill value and store.
184    uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
185    StoreInst *S = Builder->CreateStore(ConstantInt::get(ITy, Fill), Dest,
186                                        MI->isVolatile());
187    S->setAlignment(Alignment);
188
189    // Set the size of the copy to 0, it will be deleted on the next iteration.
190    MI->setLength(Constant::getNullValue(LenC->getType()));
191    return MI;
192  }
193
194  return 0;
195}
196
197/// visitCallInst - CallInst simplification.  This mostly only handles folding
198/// of intrinsic instructions.  For normal calls, it allows visitCallSite to do
199/// the heavy lifting.
200///
201Instruction *InstCombiner::visitCallInst(CallInst &CI) {
202  if (isFreeCall(&CI, TLI))
203    return visitFree(CI);
204
205  // If the caller function is nounwind, mark the call as nounwind, even if the
206  // callee isn't.
207  if (CI.getParent()->getParent()->doesNotThrow() &&
208      !CI.doesNotThrow()) {
209    CI.setDoesNotThrow();
210    return &CI;
211  }
212
213  IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
214  if (!II) return visitCallSite(&CI);
215
216  // Intrinsics cannot occur in an invoke, so handle them here instead of in
217  // visitCallSite.
218  if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
219    bool Changed = false;
220
221    // memmove/cpy/set of zero bytes is a noop.
222    if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
223      if (NumBytes->isNullValue())
224        return EraseInstFromFunction(CI);
225
226      if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
227        if (CI->getZExtValue() == 1) {
228          // Replace the instruction with just byte operations.  We would
229          // transform other cases to loads/stores, but we don't know if
230          // alignment is sufficient.
231        }
232    }
233
234    // No other transformations apply to volatile transfers.
235    if (MI->isVolatile())
236      return 0;
237
238    // If we have a memmove and the source operation is a constant global,
239    // then the source and dest pointers can't alias, so we can change this
240    // into a call to memcpy.
241    if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
242      if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
243        if (GVSrc->isConstant()) {
244          Module *M = CI.getParent()->getParent()->getParent();
245          Intrinsic::ID MemCpyID = Intrinsic::memcpy;
246          Type *Tys[3] = { CI.getArgOperand(0)->getType(),
247                           CI.getArgOperand(1)->getType(),
248                           CI.getArgOperand(2)->getType() };
249          CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys));
250          Changed = true;
251        }
252    }
253
254    if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
255      // memmove(x,x,size) -> noop.
256      if (MTI->getSource() == MTI->getDest())
257        return EraseInstFromFunction(CI);
258    }
259
260    // If we can determine a pointer alignment that is bigger than currently
261    // set, update the alignment.
262    if (isa<MemTransferInst>(MI)) {
263      if (Instruction *I = SimplifyMemTransfer(MI))
264        return I;
265    } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
266      if (Instruction *I = SimplifyMemSet(MSI))
267        return I;
268    }
269
270    if (Changed) return II;
271  }
272
273  switch (II->getIntrinsicID()) {
274  default: break;
275  case Intrinsic::objectsize: {
276    uint64_t Size;
277    if (getObjectSize(II->getArgOperand(0), Size, TD, TLI))
278      return ReplaceInstUsesWith(CI, ConstantInt::get(CI.getType(), Size));
279    return 0;
280  }
281  case Intrinsic::bswap: {
282    Value *IIOperand = II->getArgOperand(0);
283    Value *X = 0;
284
285    // bswap(bswap(x)) -> x
286    if (match(IIOperand, m_BSwap(m_Value(X))))
287        return ReplaceInstUsesWith(CI, X);
288
289    // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
290    if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) {
291      unsigned C = X->getType()->getPrimitiveSizeInBits() -
292        IIOperand->getType()->getPrimitiveSizeInBits();
293      Value *CV = ConstantInt::get(X->getType(), C);
294      Value *V = Builder->CreateLShr(X, CV);
295      return new TruncInst(V, IIOperand->getType());
296    }
297    break;
298  }
299
300  case Intrinsic::powi:
301    if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
302      // powi(x, 0) -> 1.0
303      if (Power->isZero())
304        return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0));
305      // powi(x, 1) -> x
306      if (Power->isOne())
307        return ReplaceInstUsesWith(CI, II->getArgOperand(0));
308      // powi(x, -1) -> 1/x
309      if (Power->isAllOnesValue())
310        return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
311                                          II->getArgOperand(0));
312    }
313    break;
314  case Intrinsic::cttz: {
315    // If all bits below the first known one are known zero,
316    // this value is constant.
317    IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
318    // FIXME: Try to simplify vectors of integers.
319    if (!IT) break;
320    uint32_t BitWidth = IT->getBitWidth();
321    APInt KnownZero(BitWidth, 0);
322    APInt KnownOne(BitWidth, 0);
323    ComputeMaskedBits(II->getArgOperand(0), KnownZero, KnownOne);
324    unsigned TrailingZeros = KnownOne.countTrailingZeros();
325    APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
326    if ((Mask & KnownZero) == Mask)
327      return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
328                                 APInt(BitWidth, TrailingZeros)));
329
330    }
331    break;
332  case Intrinsic::ctlz: {
333    // If all bits above the first known one are known zero,
334    // this value is constant.
335    IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
336    // FIXME: Try to simplify vectors of integers.
337    if (!IT) break;
338    uint32_t BitWidth = IT->getBitWidth();
339    APInt KnownZero(BitWidth, 0);
340    APInt KnownOne(BitWidth, 0);
341    ComputeMaskedBits(II->getArgOperand(0), KnownZero, KnownOne);
342    unsigned LeadingZeros = KnownOne.countLeadingZeros();
343    APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
344    if ((Mask & KnownZero) == Mask)
345      return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
346                                 APInt(BitWidth, LeadingZeros)));
347
348    }
349    break;
350  case Intrinsic::uadd_with_overflow: {
351    Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
352    IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
353    uint32_t BitWidth = IT->getBitWidth();
354    APInt LHSKnownZero(BitWidth, 0);
355    APInt LHSKnownOne(BitWidth, 0);
356    ComputeMaskedBits(LHS, LHSKnownZero, LHSKnownOne);
357    bool LHSKnownNegative = LHSKnownOne[BitWidth - 1];
358    bool LHSKnownPositive = LHSKnownZero[BitWidth - 1];
359
360    if (LHSKnownNegative || LHSKnownPositive) {
361      APInt RHSKnownZero(BitWidth, 0);
362      APInt RHSKnownOne(BitWidth, 0);
363      ComputeMaskedBits(RHS, RHSKnownZero, RHSKnownOne);
364      bool RHSKnownNegative = RHSKnownOne[BitWidth - 1];
365      bool RHSKnownPositive = RHSKnownZero[BitWidth - 1];
366      if (LHSKnownNegative && RHSKnownNegative) {
367        // The sign bit is set in both cases: this MUST overflow.
368        // Create a simple add instruction, and insert it into the struct.
369        Value *Add = Builder->CreateAdd(LHS, RHS);
370        Add->takeName(&CI);
371        Constant *V[] = {
372          UndefValue::get(LHS->getType()),
373          ConstantInt::getTrue(II->getContext())
374        };
375        StructType *ST = cast<StructType>(II->getType());
376        Constant *Struct = ConstantStruct::get(ST, V);
377        return InsertValueInst::Create(Struct, Add, 0);
378      }
379
380      if (LHSKnownPositive && RHSKnownPositive) {
381        // The sign bit is clear in both cases: this CANNOT overflow.
382        // Create a simple add instruction, and insert it into the struct.
383        Value *Add = Builder->CreateNUWAdd(LHS, RHS);
384        Add->takeName(&CI);
385        Constant *V[] = {
386          UndefValue::get(LHS->getType()),
387          ConstantInt::getFalse(II->getContext())
388        };
389        StructType *ST = cast<StructType>(II->getType());
390        Constant *Struct = ConstantStruct::get(ST, V);
391        return InsertValueInst::Create(Struct, Add, 0);
392      }
393    }
394  }
395  // FALL THROUGH uadd into sadd
396  case Intrinsic::sadd_with_overflow:
397    // Canonicalize constants into the RHS.
398    if (isa<Constant>(II->getArgOperand(0)) &&
399        !isa<Constant>(II->getArgOperand(1))) {
400      Value *LHS = II->getArgOperand(0);
401      II->setArgOperand(0, II->getArgOperand(1));
402      II->setArgOperand(1, LHS);
403      return II;
404    }
405
406    // X + undef -> undef
407    if (isa<UndefValue>(II->getArgOperand(1)))
408      return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
409
410    if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
411      // X + 0 -> {X, false}
412      if (RHS->isZero()) {
413        Constant *V[] = {
414          UndefValue::get(II->getArgOperand(0)->getType()),
415          ConstantInt::getFalse(II->getContext())
416        };
417        Constant *Struct =
418          ConstantStruct::get(cast<StructType>(II->getType()), V);
419        return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
420      }
421    }
422    break;
423  case Intrinsic::usub_with_overflow:
424  case Intrinsic::ssub_with_overflow:
425    // undef - X -> undef
426    // X - undef -> undef
427    if (isa<UndefValue>(II->getArgOperand(0)) ||
428        isa<UndefValue>(II->getArgOperand(1)))
429      return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
430
431    if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
432      // X - 0 -> {X, false}
433      if (RHS->isZero()) {
434        Constant *V[] = {
435          UndefValue::get(II->getArgOperand(0)->getType()),
436          ConstantInt::getFalse(II->getContext())
437        };
438        Constant *Struct =
439          ConstantStruct::get(cast<StructType>(II->getType()), V);
440        return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
441      }
442    }
443    break;
444  case Intrinsic::umul_with_overflow: {
445    Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
446    unsigned BitWidth = cast<IntegerType>(LHS->getType())->getBitWidth();
447
448    APInt LHSKnownZero(BitWidth, 0);
449    APInt LHSKnownOne(BitWidth, 0);
450    ComputeMaskedBits(LHS, LHSKnownZero, LHSKnownOne);
451    APInt RHSKnownZero(BitWidth, 0);
452    APInt RHSKnownOne(BitWidth, 0);
453    ComputeMaskedBits(RHS, RHSKnownZero, RHSKnownOne);
454
455    // Get the largest possible values for each operand.
456    APInt LHSMax = ~LHSKnownZero;
457    APInt RHSMax = ~RHSKnownZero;
458
459    // If multiplying the maximum values does not overflow then we can turn
460    // this into a plain NUW mul.
461    bool Overflow;
462    LHSMax.umul_ov(RHSMax, Overflow);
463    if (!Overflow) {
464      Value *Mul = Builder->CreateNUWMul(LHS, RHS, "umul_with_overflow");
465      Constant *V[] = {
466        UndefValue::get(LHS->getType()),
467        Builder->getFalse()
468      };
469      Constant *Struct = ConstantStruct::get(cast<StructType>(II->getType()),V);
470      return InsertValueInst::Create(Struct, Mul, 0);
471    }
472  } // FALL THROUGH
473  case Intrinsic::smul_with_overflow:
474    // Canonicalize constants into the RHS.
475    if (isa<Constant>(II->getArgOperand(0)) &&
476        !isa<Constant>(II->getArgOperand(1))) {
477      Value *LHS = II->getArgOperand(0);
478      II->setArgOperand(0, II->getArgOperand(1));
479      II->setArgOperand(1, LHS);
480      return II;
481    }
482
483    // X * undef -> undef
484    if (isa<UndefValue>(II->getArgOperand(1)))
485      return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
486
487    if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
488      // X*0 -> {0, false}
489      if (RHSI->isZero())
490        return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType()));
491
492      // X * 1 -> {X, false}
493      if (RHSI->equalsInt(1)) {
494        Constant *V[] = {
495          UndefValue::get(II->getArgOperand(0)->getType()),
496          ConstantInt::getFalse(II->getContext())
497        };
498        Constant *Struct =
499          ConstantStruct::get(cast<StructType>(II->getType()), V);
500        return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
501      }
502    }
503    break;
504  case Intrinsic::ppc_altivec_lvx:
505  case Intrinsic::ppc_altivec_lvxl:
506    // Turn PPC lvx -> load if the pointer is known aligned.
507    if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) {
508      Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
509                                         PointerType::getUnqual(II->getType()));
510      return new LoadInst(Ptr);
511    }
512    break;
513  case Intrinsic::ppc_altivec_stvx:
514  case Intrinsic::ppc_altivec_stvxl:
515    // Turn stvx -> store if the pointer is known aligned.
516    if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, TD) >= 16) {
517      Type *OpPtrTy =
518        PointerType::getUnqual(II->getArgOperand(0)->getType());
519      Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
520      return new StoreInst(II->getArgOperand(0), Ptr);
521    }
522    break;
523  case Intrinsic::x86_sse_storeu_ps:
524  case Intrinsic::x86_sse2_storeu_pd:
525  case Intrinsic::x86_sse2_storeu_dq:
526    // Turn X86 storeu -> store if the pointer is known aligned.
527    if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) {
528      Type *OpPtrTy =
529        PointerType::getUnqual(II->getArgOperand(1)->getType());
530      Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy);
531      return new StoreInst(II->getArgOperand(1), Ptr);
532    }
533    break;
534
535  case Intrinsic::x86_sse_cvtss2si:
536  case Intrinsic::x86_sse_cvtss2si64:
537  case Intrinsic::x86_sse_cvttss2si:
538  case Intrinsic::x86_sse_cvttss2si64:
539  case Intrinsic::x86_sse2_cvtsd2si:
540  case Intrinsic::x86_sse2_cvtsd2si64:
541  case Intrinsic::x86_sse2_cvttsd2si:
542  case Intrinsic::x86_sse2_cvttsd2si64: {
543    // These intrinsics only demand the 0th element of their input vectors. If
544    // we can simplify the input based on that, do so now.
545    unsigned VWidth =
546      cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements();
547    APInt DemandedElts(VWidth, 1);
548    APInt UndefElts(VWidth, 0);
549    if (Value *V = SimplifyDemandedVectorElts(II->getArgOperand(0),
550                                              DemandedElts, UndefElts)) {
551      II->setArgOperand(0, V);
552      return II;
553    }
554    break;
555  }
556
557
558  case Intrinsic::x86_sse41_pmovsxbw:
559  case Intrinsic::x86_sse41_pmovsxwd:
560  case Intrinsic::x86_sse41_pmovsxdq:
561  case Intrinsic::x86_sse41_pmovzxbw:
562  case Intrinsic::x86_sse41_pmovzxwd:
563  case Intrinsic::x86_sse41_pmovzxdq: {
564    // pmov{s|z}x ignores the upper half of their input vectors.
565    unsigned VWidth =
566      cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements();
567    unsigned LowHalfElts = VWidth / 2;
568    APInt InputDemandedElts(APInt::getBitsSet(VWidth, 0, LowHalfElts));
569    APInt UndefElts(VWidth, 0);
570    if (Value *TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0),
571                                                 InputDemandedElts,
572                                                 UndefElts)) {
573      II->setArgOperand(0, TmpV);
574      return II;
575    }
576    break;
577  }
578
579  case Intrinsic::ppc_altivec_vperm:
580    // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
581    if (Constant *Mask = dyn_cast<Constant>(II->getArgOperand(2))) {
582      assert(Mask->getType()->getVectorNumElements() == 16 &&
583             "Bad type for intrinsic!");
584
585      // Check that all of the elements are integer constants or undefs.
586      bool AllEltsOk = true;
587      for (unsigned i = 0; i != 16; ++i) {
588        Constant *Elt = Mask->getAggregateElement(i);
589        if (Elt == 0 ||
590            !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
591          AllEltsOk = false;
592          break;
593        }
594      }
595
596      if (AllEltsOk) {
597        // Cast the input vectors to byte vectors.
598        Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0),
599                                            Mask->getType());
600        Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1),
601                                            Mask->getType());
602        Value *Result = UndefValue::get(Op0->getType());
603
604        // Only extract each element once.
605        Value *ExtractedElts[32];
606        memset(ExtractedElts, 0, sizeof(ExtractedElts));
607
608        for (unsigned i = 0; i != 16; ++i) {
609          if (isa<UndefValue>(Mask->getAggregateElement(i)))
610            continue;
611          unsigned Idx =
612            cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
613          Idx &= 31;  // Match the hardware behavior.
614
615          if (ExtractedElts[Idx] == 0) {
616            ExtractedElts[Idx] =
617              Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1,
618                                            Builder->getInt32(Idx&15));
619          }
620
621          // Insert this value into the result vector.
622          Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx],
623                                                Builder->getInt32(i));
624        }
625        return CastInst::Create(Instruction::BitCast, Result, CI.getType());
626      }
627    }
628    break;
629
630  case Intrinsic::arm_neon_vld1:
631  case Intrinsic::arm_neon_vld2:
632  case Intrinsic::arm_neon_vld3:
633  case Intrinsic::arm_neon_vld4:
634  case Intrinsic::arm_neon_vld2lane:
635  case Intrinsic::arm_neon_vld3lane:
636  case Intrinsic::arm_neon_vld4lane:
637  case Intrinsic::arm_neon_vst1:
638  case Intrinsic::arm_neon_vst2:
639  case Intrinsic::arm_neon_vst3:
640  case Intrinsic::arm_neon_vst4:
641  case Intrinsic::arm_neon_vst2lane:
642  case Intrinsic::arm_neon_vst3lane:
643  case Intrinsic::arm_neon_vst4lane: {
644    unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), TD);
645    unsigned AlignArg = II->getNumArgOperands() - 1;
646    ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
647    if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
648      II->setArgOperand(AlignArg,
649                        ConstantInt::get(Type::getInt32Ty(II->getContext()),
650                                         MemAlign, false));
651      return II;
652    }
653    break;
654  }
655
656  case Intrinsic::arm_neon_vmulls:
657  case Intrinsic::arm_neon_vmullu: {
658    Value *Arg0 = II->getArgOperand(0);
659    Value *Arg1 = II->getArgOperand(1);
660
661    // Handle mul by zero first:
662    if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
663      return ReplaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType()));
664    }
665
666    // Check for constant LHS & RHS - in this case we just simplify.
667    bool Zext = (II->getIntrinsicID() == Intrinsic::arm_neon_vmullu);
668    VectorType *NewVT = cast<VectorType>(II->getType());
669    unsigned NewWidth = NewVT->getElementType()->getIntegerBitWidth();
670    if (ConstantDataVector *CV0 = dyn_cast<ConstantDataVector>(Arg0)) {
671      if (ConstantDataVector *CV1 = dyn_cast<ConstantDataVector>(Arg1)) {
672        VectorType* VT = cast<VectorType>(CV0->getType());
673        SmallVector<Constant*, 4> NewElems;
674        for (unsigned i = 0; i < VT->getNumElements(); ++i) {
675          APInt CV0E =
676            (cast<ConstantInt>(CV0->getAggregateElement(i)))->getValue();
677          CV0E = Zext ? CV0E.zext(NewWidth) : CV0E.sext(NewWidth);
678          APInt CV1E =
679            (cast<ConstantInt>(CV1->getAggregateElement(i)))->getValue();
680          CV1E = Zext ? CV1E.zext(NewWidth) : CV1E.sext(NewWidth);
681          NewElems.push_back(
682            ConstantInt::get(NewVT->getElementType(), CV0E * CV1E));
683        }
684        return ReplaceInstUsesWith(CI, ConstantVector::get(NewElems));
685      }
686
687      // Couldn't simplify - cannonicalize constant to the RHS.
688      std::swap(Arg0, Arg1);
689    }
690
691    // Handle mul by one:
692    if (ConstantDataVector *CV1 = dyn_cast<ConstantDataVector>(Arg1)) {
693      if (ConstantInt *Splat =
694            dyn_cast_or_null<ConstantInt>(CV1->getSplatValue())) {
695        if (Splat->isOne()) {
696          if (Zext)
697            return CastInst::CreateZExtOrBitCast(Arg0, II->getType());
698          // else
699          return CastInst::CreateSExtOrBitCast(Arg0, II->getType());
700        }
701      }
702    }
703
704    break;
705  }
706
707  case Intrinsic::stackrestore: {
708    // If the save is right next to the restore, remove the restore.  This can
709    // happen when variable allocas are DCE'd.
710    if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
711      if (SS->getIntrinsicID() == Intrinsic::stacksave) {
712        BasicBlock::iterator BI = SS;
713        if (&*++BI == II)
714          return EraseInstFromFunction(CI);
715      }
716    }
717
718    // Scan down this block to see if there is another stack restore in the
719    // same block without an intervening call/alloca.
720    BasicBlock::iterator BI = II;
721    TerminatorInst *TI = II->getParent()->getTerminator();
722    bool CannotRemove = false;
723    for (++BI; &*BI != TI; ++BI) {
724      if (isa<AllocaInst>(BI)) {
725        CannotRemove = true;
726        break;
727      }
728      if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
729        if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
730          // If there is a stackrestore below this one, remove this one.
731          if (II->getIntrinsicID() == Intrinsic::stackrestore)
732            return EraseInstFromFunction(CI);
733          // Otherwise, ignore the intrinsic.
734        } else {
735          // If we found a non-intrinsic call, we can't remove the stack
736          // restore.
737          CannotRemove = true;
738          break;
739        }
740      }
741    }
742
743    // If the stack restore is in a return, resume, or unwind block and if there
744    // are no allocas or calls between the restore and the return, nuke the
745    // restore.
746    if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
747      return EraseInstFromFunction(CI);
748    break;
749  }
750  }
751
752  return visitCallSite(II);
753}
754
755// InvokeInst simplification
756//
757Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
758  return visitCallSite(&II);
759}
760
761/// isSafeToEliminateVarargsCast - If this cast does not affect the value
762/// passed through the varargs area, we can eliminate the use of the cast.
763static bool isSafeToEliminateVarargsCast(const CallSite CS,
764                                         const CastInst * const CI,
765                                         const DataLayout * const TD,
766                                         const int ix) {
767  if (!CI->isLosslessCast())
768    return false;
769
770  // The size of ByVal arguments is derived from the type, so we
771  // can't change to a type with a different size.  If the size were
772  // passed explicitly we could avoid this check.
773  if (!CS.isByValArgument(ix))
774    return true;
775
776  Type* SrcTy =
777            cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
778  Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
779  if (!SrcTy->isSized() || !DstTy->isSized())
780    return false;
781  if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy))
782    return false;
783  return true;
784}
785
786// Try to fold some different type of calls here.
787// Currently we're only working with the checking functions, memcpy_chk,
788// mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk,
789// strcat_chk and strncat_chk.
790Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const DataLayout *TD) {
791  if (CI->getCalledFunction() == 0) return 0;
792
793  if (Value *With = Simplifier->optimizeCall(CI)) {
794    ++NumSimplified;
795    return CI->use_empty() ? CI : ReplaceInstUsesWith(*CI, With);
796  }
797
798  return 0;
799}
800
801static IntrinsicInst *FindInitTrampolineFromAlloca(Value *TrampMem) {
802  // Strip off at most one level of pointer casts, looking for an alloca.  This
803  // is good enough in practice and simpler than handling any number of casts.
804  Value *Underlying = TrampMem->stripPointerCasts();
805  if (Underlying != TrampMem &&
806      (!Underlying->hasOneUse() || *Underlying->use_begin() != TrampMem))
807    return 0;
808  if (!isa<AllocaInst>(Underlying))
809    return 0;
810
811  IntrinsicInst *InitTrampoline = 0;
812  for (Value::use_iterator I = TrampMem->use_begin(), E = TrampMem->use_end();
813       I != E; I++) {
814    IntrinsicInst *II = dyn_cast<IntrinsicInst>(*I);
815    if (!II)
816      return 0;
817    if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
818      if (InitTrampoline)
819        // More than one init_trampoline writes to this value.  Give up.
820        return 0;
821      InitTrampoline = II;
822      continue;
823    }
824    if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)
825      // Allow any number of calls to adjust.trampoline.
826      continue;
827    return 0;
828  }
829
830  // No call to init.trampoline found.
831  if (!InitTrampoline)
832    return 0;
833
834  // Check that the alloca is being used in the expected way.
835  if (InitTrampoline->getOperand(0) != TrampMem)
836    return 0;
837
838  return InitTrampoline;
839}
840
841static IntrinsicInst *FindInitTrampolineFromBB(IntrinsicInst *AdjustTramp,
842                                               Value *TrampMem) {
843  // Visit all the previous instructions in the basic block, and try to find a
844  // init.trampoline which has a direct path to the adjust.trampoline.
845  for (BasicBlock::iterator I = AdjustTramp,
846       E = AdjustTramp->getParent()->begin(); I != E; ) {
847    Instruction *Inst = --I;
848    if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
849      if (II->getIntrinsicID() == Intrinsic::init_trampoline &&
850          II->getOperand(0) == TrampMem)
851        return II;
852    if (Inst->mayWriteToMemory())
853      return 0;
854  }
855  return 0;
856}
857
858// Given a call to llvm.adjust.trampoline, find and return the corresponding
859// call to llvm.init.trampoline if the call to the trampoline can be optimized
860// to a direct call to a function.  Otherwise return NULL.
861//
862static IntrinsicInst *FindInitTrampoline(Value *Callee) {
863  Callee = Callee->stripPointerCasts();
864  IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
865  if (!AdjustTramp ||
866      AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline)
867    return 0;
868
869  Value *TrampMem = AdjustTramp->getOperand(0);
870
871  if (IntrinsicInst *IT = FindInitTrampolineFromAlloca(TrampMem))
872    return IT;
873  if (IntrinsicInst *IT = FindInitTrampolineFromBB(AdjustTramp, TrampMem))
874    return IT;
875  return 0;
876}
877
878// visitCallSite - Improvements for call and invoke instructions.
879//
880Instruction *InstCombiner::visitCallSite(CallSite CS) {
881  if (isAllocLikeFn(CS.getInstruction(), TLI))
882    return visitAllocSite(*CS.getInstruction());
883
884  bool Changed = false;
885
886  // If the callee is a pointer to a function, attempt to move any casts to the
887  // arguments of the call/invoke.
888  Value *Callee = CS.getCalledValue();
889  if (!isa<Function>(Callee) && transformConstExprCastCall(CS))
890    return 0;
891
892  if (Function *CalleeF = dyn_cast<Function>(Callee))
893    // If the call and callee calling conventions don't match, this call must
894    // be unreachable, as the call is undefined.
895    if (CalleeF->getCallingConv() != CS.getCallingConv() &&
896        // Only do this for calls to a function with a body.  A prototype may
897        // not actually end up matching the implementation's calling conv for a
898        // variety of reasons (e.g. it may be written in assembly).
899        !CalleeF->isDeclaration()) {
900      Instruction *OldCall = CS.getInstruction();
901      new StoreInst(ConstantInt::getTrue(Callee->getContext()),
902                UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
903                                  OldCall);
904      // If OldCall does not return void then replaceAllUsesWith undef.
905      // This allows ValueHandlers and custom metadata to adjust itself.
906      if (!OldCall->getType()->isVoidTy())
907        ReplaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType()));
908      if (isa<CallInst>(OldCall))
909        return EraseInstFromFunction(*OldCall);
910
911      // We cannot remove an invoke, because it would change the CFG, just
912      // change the callee to a null pointer.
913      cast<InvokeInst>(OldCall)->setCalledFunction(
914                                    Constant::getNullValue(CalleeF->getType()));
915      return 0;
916    }
917
918  if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
919    // If CS does not return void then replaceAllUsesWith undef.
920    // This allows ValueHandlers and custom metadata to adjust itself.
921    if (!CS.getInstruction()->getType()->isVoidTy())
922      ReplaceInstUsesWith(*CS.getInstruction(),
923                          UndefValue::get(CS.getInstruction()->getType()));
924
925    if (isa<InvokeInst>(CS.getInstruction())) {
926      // Can't remove an invoke because we cannot change the CFG.
927      return 0;
928    }
929
930    // This instruction is not reachable, just remove it.  We insert a store to
931    // undef so that we know that this code is not reachable, despite the fact
932    // that we can't modify the CFG here.
933    new StoreInst(ConstantInt::getTrue(Callee->getContext()),
934                  UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
935                  CS.getInstruction());
936
937    return EraseInstFromFunction(*CS.getInstruction());
938  }
939
940  if (IntrinsicInst *II = FindInitTrampoline(Callee))
941    return transformCallThroughTrampoline(CS, II);
942
943  PointerType *PTy = cast<PointerType>(Callee->getType());
944  FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
945  if (FTy->isVarArg()) {
946    int ix = FTy->getNumParams();
947    // See if we can optimize any arguments passed through the varargs area of
948    // the call.
949    for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(),
950           E = CS.arg_end(); I != E; ++I, ++ix) {
951      CastInst *CI = dyn_cast<CastInst>(*I);
952      if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) {
953        *I = CI->getOperand(0);
954        Changed = true;
955      }
956    }
957  }
958
959  if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
960    // Inline asm calls cannot throw - mark them 'nounwind'.
961    CS.setDoesNotThrow();
962    Changed = true;
963  }
964
965  // Try to optimize the call if possible, we require DataLayout for most of
966  // this.  None of these calls are seen as possibly dead so go ahead and
967  // delete the instruction now.
968  if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
969    Instruction *I = tryOptimizeCall(CI, TD);
970    // If we changed something return the result, etc. Otherwise let
971    // the fallthrough check.
972    if (I) return EraseInstFromFunction(*I);
973  }
974
975  return Changed ? CS.getInstruction() : 0;
976}
977
978// transformConstExprCastCall - If the callee is a constexpr cast of a function,
979// attempt to move the cast to the arguments of the call/invoke.
980//
981bool InstCombiner::transformConstExprCastCall(CallSite CS) {
982  Function *Callee =
983    dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
984  if (Callee == 0)
985    return false;
986  Instruction *Caller = CS.getInstruction();
987  const AttributeSet &CallerPAL = CS.getAttributes();
988
989  // Okay, this is a cast from a function to a different type.  Unless doing so
990  // would cause a type conversion of one of our arguments, change this call to
991  // be a direct call with arguments casted to the appropriate types.
992  //
993  FunctionType *FT = Callee->getFunctionType();
994  Type *OldRetTy = Caller->getType();
995  Type *NewRetTy = FT->getReturnType();
996
997  if (NewRetTy->isStructTy())
998    return false; // TODO: Handle multiple return values.
999
1000  // Check to see if we are changing the return type...
1001  if (OldRetTy != NewRetTy) {
1002    if (Callee->isDeclaration() &&
1003        // Conversion is ok if changing from one pointer type to another or from
1004        // a pointer to an integer of the same size.
1005        !((OldRetTy->isPointerTy() || !TD ||
1006           OldRetTy == TD->getIntPtrType(Caller->getContext())) &&
1007          (NewRetTy->isPointerTy() || !TD ||
1008           NewRetTy == TD->getIntPtrType(Caller->getContext()))))
1009      return false;   // Cannot transform this return value.
1010
1011    if (!Caller->use_empty() &&
1012        // void -> non-void is handled specially
1013        !NewRetTy->isVoidTy() && !CastInst::isCastable(NewRetTy, OldRetTy))
1014      return false;   // Cannot transform this return value.
1015
1016    if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
1017      AttrBuilder RAttrs(CallerPAL, AttributeSet::ReturnIndex);
1018      if (RAttrs.
1019          hasAttributes(AttributeFuncs::
1020                        typeIncompatible(NewRetTy, AttributeSet::ReturnIndex),
1021                        AttributeSet::ReturnIndex))
1022        return false;   // Attribute not compatible with transformed value.
1023    }
1024
1025    // If the callsite is an invoke instruction, and the return value is used by
1026    // a PHI node in a successor, we cannot change the return type of the call
1027    // because there is no place to put the cast instruction (without breaking
1028    // the critical edge).  Bail out in this case.
1029    if (!Caller->use_empty())
1030      if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
1031        for (Value::use_iterator UI = II->use_begin(), E = II->use_end();
1032             UI != E; ++UI)
1033          if (PHINode *PN = dyn_cast<PHINode>(*UI))
1034            if (PN->getParent() == II->getNormalDest() ||
1035                PN->getParent() == II->getUnwindDest())
1036              return false;
1037  }
1038
1039  unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin());
1040  unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
1041
1042  CallSite::arg_iterator AI = CS.arg_begin();
1043  for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
1044    Type *ParamTy = FT->getParamType(i);
1045    Type *ActTy = (*AI)->getType();
1046
1047    if (!CastInst::isCastable(ActTy, ParamTy))
1048      return false;   // Cannot transform this parameter value.
1049
1050    if (AttrBuilder(CallerPAL.getParamAttributes(i + 1), i + 1).
1051          hasAttributes(AttributeFuncs::
1052                        typeIncompatible(ParamTy, i + 1), i + 1))
1053      return false;   // Attribute not compatible with transformed value.
1054
1055    // If the parameter is passed as a byval argument, then we have to have a
1056    // sized type and the sized type has to have the same size as the old type.
1057    if (ParamTy != ActTy &&
1058        CallerPAL.getParamAttributes(i + 1).hasAttribute(i + 1,
1059                                                         Attribute::ByVal)) {
1060      PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
1061      if (ParamPTy == 0 || !ParamPTy->getElementType()->isSized() || TD == 0)
1062        return false;
1063
1064      Type *CurElTy = cast<PointerType>(ActTy)->getElementType();
1065      if (TD->getTypeAllocSize(CurElTy) !=
1066          TD->getTypeAllocSize(ParamPTy->getElementType()))
1067        return false;
1068    }
1069
1070    // Converting from one pointer type to another or between a pointer and an
1071    // integer of the same size is safe even if we do not have a body.
1072    bool isConvertible = ActTy == ParamTy ||
1073      (TD && ((ParamTy->isPointerTy() ||
1074      ParamTy == TD->getIntPtrType(Caller->getContext())) &&
1075              (ActTy->isPointerTy() ||
1076              ActTy == TD->getIntPtrType(Caller->getContext()))));
1077    if (Callee->isDeclaration() && !isConvertible) return false;
1078  }
1079
1080  if (Callee->isDeclaration()) {
1081    // Do not delete arguments unless we have a function body.
1082    if (FT->getNumParams() < NumActualArgs && !FT->isVarArg())
1083      return false;
1084
1085    // If the callee is just a declaration, don't change the varargsness of the
1086    // call.  We don't want to introduce a varargs call where one doesn't
1087    // already exist.
1088    PointerType *APTy = cast<PointerType>(CS.getCalledValue()->getType());
1089    if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg())
1090      return false;
1091
1092    // If both the callee and the cast type are varargs, we still have to make
1093    // sure the number of fixed parameters are the same or we have the same
1094    // ABI issues as if we introduce a varargs call.
1095    if (FT->isVarArg() &&
1096        cast<FunctionType>(APTy->getElementType())->isVarArg() &&
1097        FT->getNumParams() !=
1098        cast<FunctionType>(APTy->getElementType())->getNumParams())
1099      return false;
1100  }
1101
1102  if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
1103      !CallerPAL.isEmpty())
1104    // In this case we have more arguments than the new function type, but we
1105    // won't be dropping them.  Check that these extra arguments have attributes
1106    // that are compatible with being a vararg call argument.
1107    for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
1108      unsigned Index = CallerPAL.getSlotIndex(i - 1);
1109      if (Index <= FT->getNumParams())
1110        break;
1111
1112      // Check if it has an attribute that's incompatible with varargs.
1113      AttributeSet PAttrs = CallerPAL.getSlotAttributes(i - 1);
1114      if (PAttrs.hasAttribute(Index, Attribute::StructRet))
1115        return false;
1116    }
1117
1118
1119  // Okay, we decided that this is a safe thing to do: go ahead and start
1120  // inserting cast instructions as necessary.
1121  std::vector<Value*> Args;
1122  Args.reserve(NumActualArgs);
1123  SmallVector<AttributeSet, 8> attrVec;
1124  attrVec.reserve(NumCommonArgs);
1125
1126  // Get any return attributes.
1127  AttrBuilder RAttrs(CallerPAL, AttributeSet::ReturnIndex);
1128
1129  // If the return value is not being used, the type may not be compatible
1130  // with the existing attributes.  Wipe out any problematic attributes.
1131  RAttrs.
1132    removeAttributes(AttributeFuncs::
1133                     typeIncompatible(NewRetTy, AttributeSet::ReturnIndex),
1134                     AttributeSet::ReturnIndex);
1135
1136  // Add the new return attributes.
1137  if (RAttrs.hasAttributes())
1138    attrVec.push_back(AttributeSet::get(Caller->getContext(),
1139                                        AttributeSet::ReturnIndex, RAttrs));
1140
1141  AI = CS.arg_begin();
1142  for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
1143    Type *ParamTy = FT->getParamType(i);
1144    if ((*AI)->getType() == ParamTy) {
1145      Args.push_back(*AI);
1146    } else {
1147      Instruction::CastOps opcode = CastInst::getCastOpcode(*AI,
1148          false, ParamTy, false);
1149      Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy));
1150    }
1151
1152    // Add any parameter attributes.
1153    AttrBuilder PAttrs(CallerPAL.getParamAttributes(i + 1), i + 1);
1154    if (PAttrs.hasAttributes())
1155      attrVec.push_back(AttributeSet::get(Caller->getContext(), i + 1,
1156                                          PAttrs));
1157  }
1158
1159  // If the function takes more arguments than the call was taking, add them
1160  // now.
1161  for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i)
1162    Args.push_back(Constant::getNullValue(FT->getParamType(i)));
1163
1164  // If we are removing arguments to the function, emit an obnoxious warning.
1165  if (FT->getNumParams() < NumActualArgs) {
1166    // TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722
1167    if (FT->isVarArg()) {
1168      // Add all of the arguments in their promoted form to the arg list.
1169      for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
1170        Type *PTy = getPromotedType((*AI)->getType());
1171        if (PTy != (*AI)->getType()) {
1172          // Must promote to pass through va_arg area!
1173          Instruction::CastOps opcode =
1174            CastInst::getCastOpcode(*AI, false, PTy, false);
1175          Args.push_back(Builder->CreateCast(opcode, *AI, PTy));
1176        } else {
1177          Args.push_back(*AI);
1178        }
1179
1180        // Add any parameter attributes.
1181        AttrBuilder PAttrs(CallerPAL.getParamAttributes(i + 1), i + 1);
1182        if (PAttrs.hasAttributes())
1183          attrVec.push_back(AttributeSet::get(FT->getContext(), i + 1,
1184                                              PAttrs));
1185      }
1186    }
1187  }
1188
1189  AttributeSet FnAttrs = CallerPAL.getFnAttributes();
1190  if (CallerPAL.hasAttributes(AttributeSet::FunctionIndex))
1191    attrVec.push_back(AttributeSet::get(Callee->getContext(), FnAttrs));
1192
1193  if (NewRetTy->isVoidTy())
1194    Caller->setName("");   // Void type should not have a name.
1195
1196  const AttributeSet &NewCallerPAL = AttributeSet::get(Callee->getContext(),
1197                                                       attrVec);
1198
1199  Instruction *NC;
1200  if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1201    NC = Builder->CreateInvoke(Callee, II->getNormalDest(),
1202                               II->getUnwindDest(), Args);
1203    NC->takeName(II);
1204    cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv());
1205    cast<InvokeInst>(NC)->setAttributes(NewCallerPAL);
1206  } else {
1207    CallInst *CI = cast<CallInst>(Caller);
1208    NC = Builder->CreateCall(Callee, Args);
1209    NC->takeName(CI);
1210    if (CI->isTailCall())
1211      cast<CallInst>(NC)->setTailCall();
1212    cast<CallInst>(NC)->setCallingConv(CI->getCallingConv());
1213    cast<CallInst>(NC)->setAttributes(NewCallerPAL);
1214  }
1215
1216  // Insert a cast of the return type as necessary.
1217  Value *NV = NC;
1218  if (OldRetTy != NV->getType() && !Caller->use_empty()) {
1219    if (!NV->getType()->isVoidTy()) {
1220      Instruction::CastOps opcode =
1221        CastInst::getCastOpcode(NC, false, OldRetTy, false);
1222      NV = NC = CastInst::Create(opcode, NC, OldRetTy);
1223      NC->setDebugLoc(Caller->getDebugLoc());
1224
1225      // If this is an invoke instruction, we should insert it after the first
1226      // non-phi, instruction in the normal successor block.
1227      if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1228        BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt();
1229        InsertNewInstBefore(NC, *I);
1230      } else {
1231        // Otherwise, it's a call, just insert cast right after the call.
1232        InsertNewInstBefore(NC, *Caller);
1233      }
1234      Worklist.AddUsersToWorkList(*Caller);
1235    } else {
1236      NV = UndefValue::get(Caller->getType());
1237    }
1238  }
1239
1240  if (!Caller->use_empty())
1241    ReplaceInstUsesWith(*Caller, NV);
1242
1243  EraseInstFromFunction(*Caller);
1244  return true;
1245}
1246
1247// transformCallThroughTrampoline - Turn a call to a function created by
1248// init_trampoline / adjust_trampoline intrinsic pair into a direct call to the
1249// underlying function.
1250//
1251Instruction *
1252InstCombiner::transformCallThroughTrampoline(CallSite CS,
1253                                             IntrinsicInst *Tramp) {
1254  Value *Callee = CS.getCalledValue();
1255  PointerType *PTy = cast<PointerType>(Callee->getType());
1256  FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
1257  const AttributeSet &Attrs = CS.getAttributes();
1258
1259  // If the call already has the 'nest' attribute somewhere then give up -
1260  // otherwise 'nest' would occur twice after splicing in the chain.
1261  if (Attrs.hasAttrSomewhere(Attribute::Nest))
1262    return 0;
1263
1264  assert(Tramp &&
1265         "transformCallThroughTrampoline called with incorrect CallSite.");
1266
1267  Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts());
1268  PointerType *NestFPTy = cast<PointerType>(NestF->getType());
1269  FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
1270
1271  const AttributeSet &NestAttrs = NestF->getAttributes();
1272  if (!NestAttrs.isEmpty()) {
1273    unsigned NestIdx = 1;
1274    Type *NestTy = 0;
1275    AttributeSet NestAttr;
1276
1277    // Look for a parameter marked with the 'nest' attribute.
1278    for (FunctionType::param_iterator I = NestFTy->param_begin(),
1279         E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
1280      if (NestAttrs.hasAttribute(NestIdx, Attribute::Nest)) {
1281        // Record the parameter type and any other attributes.
1282        NestTy = *I;
1283        NestAttr = NestAttrs.getParamAttributes(NestIdx);
1284        break;
1285      }
1286
1287    if (NestTy) {
1288      Instruction *Caller = CS.getInstruction();
1289      std::vector<Value*> NewArgs;
1290      NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1);
1291
1292      SmallVector<AttributeSet, 8> NewAttrs;
1293      NewAttrs.reserve(Attrs.getNumSlots() + 1);
1294
1295      // Insert the nest argument into the call argument list, which may
1296      // mean appending it.  Likewise for attributes.
1297
1298      // Add any result attributes.
1299      if (Attrs.hasAttributes(AttributeSet::ReturnIndex))
1300        NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
1301                                             Attrs.getRetAttributes()));
1302
1303      {
1304        unsigned Idx = 1;
1305        CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
1306        do {
1307          if (Idx == NestIdx) {
1308            // Add the chain argument and attributes.
1309            Value *NestVal = Tramp->getArgOperand(2);
1310            if (NestVal->getType() != NestTy)
1311              NestVal = Builder->CreateBitCast(NestVal, NestTy, "nest");
1312            NewArgs.push_back(NestVal);
1313            NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
1314                                                 NestAttr));
1315          }
1316
1317          if (I == E)
1318            break;
1319
1320          // Add the original argument and attributes.
1321          NewArgs.push_back(*I);
1322          AttributeSet Attr = Attrs.getParamAttributes(Idx);
1323          if (Attr.hasAttributes(Idx)) {
1324            AttrBuilder B(Attr, Idx);
1325            NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
1326                                                 Idx + (Idx >= NestIdx), B));
1327          }
1328
1329          ++Idx, ++I;
1330        } while (1);
1331      }
1332
1333      // Add any function attributes.
1334      if (Attrs.hasAttributes(AttributeSet::FunctionIndex))
1335        NewAttrs.push_back(AttributeSet::get(FTy->getContext(),
1336                                             Attrs.getFnAttributes()));
1337
1338      // The trampoline may have been bitcast to a bogus type (FTy).
1339      // Handle this by synthesizing a new function type, equal to FTy
1340      // with the chain parameter inserted.
1341
1342      std::vector<Type*> NewTypes;
1343      NewTypes.reserve(FTy->getNumParams()+1);
1344
1345      // Insert the chain's type into the list of parameter types, which may
1346      // mean appending it.
1347      {
1348        unsigned Idx = 1;
1349        FunctionType::param_iterator I = FTy->param_begin(),
1350          E = FTy->param_end();
1351
1352        do {
1353          if (Idx == NestIdx)
1354            // Add the chain's type.
1355            NewTypes.push_back(NestTy);
1356
1357          if (I == E)
1358            break;
1359
1360          // Add the original type.
1361          NewTypes.push_back(*I);
1362
1363          ++Idx, ++I;
1364        } while (1);
1365      }
1366
1367      // Replace the trampoline call with a direct call.  Let the generic
1368      // code sort out any function type mismatches.
1369      FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
1370                                                FTy->isVarArg());
1371      Constant *NewCallee =
1372        NestF->getType() == PointerType::getUnqual(NewFTy) ?
1373        NestF : ConstantExpr::getBitCast(NestF,
1374                                         PointerType::getUnqual(NewFTy));
1375      const AttributeSet &NewPAL =
1376          AttributeSet::get(FTy->getContext(), NewAttrs);
1377
1378      Instruction *NewCaller;
1379      if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1380        NewCaller = InvokeInst::Create(NewCallee,
1381                                       II->getNormalDest(), II->getUnwindDest(),
1382                                       NewArgs);
1383        cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
1384        cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
1385      } else {
1386        NewCaller = CallInst::Create(NewCallee, NewArgs);
1387        if (cast<CallInst>(Caller)->isTailCall())
1388          cast<CallInst>(NewCaller)->setTailCall();
1389        cast<CallInst>(NewCaller)->
1390          setCallingConv(cast<CallInst>(Caller)->getCallingConv());
1391        cast<CallInst>(NewCaller)->setAttributes(NewPAL);
1392      }
1393
1394      return NewCaller;
1395    }
1396  }
1397
1398  // Replace the trampoline call with a direct call.  Since there is no 'nest'
1399  // parameter, there is no need to adjust the argument list.  Let the generic
1400  // code sort out any function type mismatches.
1401  Constant *NewCallee =
1402    NestF->getType() == PTy ? NestF :
1403                              ConstantExpr::getBitCast(NestF, PTy);
1404  CS.setCalledFunction(NewCallee);
1405  return CS.getInstruction();
1406}
1407