1//===- InstCombineCalls.cpp -----------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the visitCall and visitInvoke functions.
11//
12//===----------------------------------------------------------------------===//
13
14#include "InstCombineInternal.h"
15#include "llvm/ADT/Statistic.h"
16#include "llvm/Analysis/InstructionSimplify.h"
17#include "llvm/Analysis/Loads.h"
18#include "llvm/Analysis/MemoryBuiltins.h"
19#include "llvm/IR/CallSite.h"
20#include "llvm/IR/Dominators.h"
21#include "llvm/IR/PatternMatch.h"
22#include "llvm/IR/Statepoint.h"
23#include "llvm/Transforms/Utils/BuildLibCalls.h"
24#include "llvm/Transforms/Utils/Local.h"
25#include "llvm/Transforms/Utils/SimplifyLibCalls.h"
26using namespace llvm;
27using namespace PatternMatch;
28
29#define DEBUG_TYPE "instcombine"
30
31STATISTIC(NumSimplified, "Number of library calls simplified");
32
33/// Return the specified type promoted as it would be to pass though a va_arg
34/// area.
35static Type *getPromotedType(Type *Ty) {
36  if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
37    if (ITy->getBitWidth() < 32)
38      return Type::getInt32Ty(Ty->getContext());
39  }
40  return Ty;
41}
42
43/// Given an aggregate type which ultimately holds a single scalar element,
44/// like {{{type}}} or [1 x type], return type.
45static Type *reduceToSingleValueType(Type *T) {
46  while (!T->isSingleValueType()) {
47    if (StructType *STy = dyn_cast<StructType>(T)) {
48      if (STy->getNumElements() == 1)
49        T = STy->getElementType(0);
50      else
51        break;
52    } else if (ArrayType *ATy = dyn_cast<ArrayType>(T)) {
53      if (ATy->getNumElements() == 1)
54        T = ATy->getElementType();
55      else
56        break;
57    } else
58      break;
59  }
60
61  return T;
62}
63
64/// Return a constant boolean vector that has true elements in all positions
65/// where the input constant data vector has an element with the sign bit set.
66static Constant *getNegativeIsTrueBoolVec(ConstantDataVector *V) {
67  SmallVector<Constant *, 32> BoolVec;
68  IntegerType *BoolTy = Type::getInt1Ty(V->getContext());
69  for (unsigned I = 0, E = V->getNumElements(); I != E; ++I) {
70    Constant *Elt = V->getElementAsConstant(I);
71    assert((isa<ConstantInt>(Elt) || isa<ConstantFP>(Elt)) &&
72           "Unexpected constant data vector element type");
73    bool Sign = V->getElementType()->isIntegerTy()
74                    ? cast<ConstantInt>(Elt)->isNegative()
75                    : cast<ConstantFP>(Elt)->isNegative();
76    BoolVec.push_back(ConstantInt::get(BoolTy, Sign));
77  }
78  return ConstantVector::get(BoolVec);
79}
80
81Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
82  unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), DL, MI, AC, DT);
83  unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), DL, MI, AC, DT);
84  unsigned MinAlign = std::min(DstAlign, SrcAlign);
85  unsigned CopyAlign = MI->getAlignment();
86
87  if (CopyAlign < MinAlign) {
88    MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), MinAlign, false));
89    return MI;
90  }
91
92  // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
93  // load/store.
94  ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getArgOperand(2));
95  if (!MemOpLength) return nullptr;
96
97  // Source and destination pointer types are always "i8*" for intrinsic.  See
98  // if the size is something we can handle with a single primitive load/store.
99  // A single load+store correctly handles overlapping memory in the memmove
100  // case.
101  uint64_t Size = MemOpLength->getLimitedValue();
102  assert(Size && "0-sized memory transferring should be removed already.");
103
104  if (Size > 8 || (Size&(Size-1)))
105    return nullptr;  // If not 1/2/4/8 bytes, exit.
106
107  // Use an integer load+store unless we can find something better.
108  unsigned SrcAddrSp =
109    cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
110  unsigned DstAddrSp =
111    cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
112
113  IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
114  Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
115  Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
116
117  // Memcpy forces the use of i8* for the source and destination.  That means
118  // that if you're using memcpy to move one double around, you'll get a cast
119  // from double* to i8*.  We'd much rather use a double load+store rather than
120  // an i64 load+store, here because this improves the odds that the source or
121  // dest address will be promotable.  See if we can find a better type than the
122  // integer datatype.
123  Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts();
124  MDNode *CopyMD = nullptr;
125  if (StrippedDest != MI->getArgOperand(0)) {
126    Type *SrcETy = cast<PointerType>(StrippedDest->getType())
127                                    ->getElementType();
128    if (SrcETy->isSized() && DL.getTypeStoreSize(SrcETy) == Size) {
129      // The SrcETy might be something like {{{double}}} or [1 x double].  Rip
130      // down through these levels if so.
131      SrcETy = reduceToSingleValueType(SrcETy);
132
133      if (SrcETy->isSingleValueType()) {
134        NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp);
135        NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp);
136
137        // If the memcpy has metadata describing the members, see if we can
138        // get the TBAA tag describing our copy.
139        if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa_struct)) {
140          if (M->getNumOperands() == 3 && M->getOperand(0) &&
141              mdconst::hasa<ConstantInt>(M->getOperand(0)) &&
142              mdconst::extract<ConstantInt>(M->getOperand(0))->isNullValue() &&
143              M->getOperand(1) &&
144              mdconst::hasa<ConstantInt>(M->getOperand(1)) &&
145              mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() ==
146                  Size &&
147              M->getOperand(2) && isa<MDNode>(M->getOperand(2)))
148            CopyMD = cast<MDNode>(M->getOperand(2));
149        }
150      }
151    }
152  }
153
154  // If the memcpy/memmove provides better alignment info than we can
155  // infer, use it.
156  SrcAlign = std::max(SrcAlign, CopyAlign);
157  DstAlign = std::max(DstAlign, CopyAlign);
158
159  Value *Src = Builder->CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
160  Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
161  LoadInst *L = Builder->CreateLoad(Src, MI->isVolatile());
162  L->setAlignment(SrcAlign);
163  if (CopyMD)
164    L->setMetadata(LLVMContext::MD_tbaa, CopyMD);
165  StoreInst *S = Builder->CreateStore(L, Dest, MI->isVolatile());
166  S->setAlignment(DstAlign);
167  if (CopyMD)
168    S->setMetadata(LLVMContext::MD_tbaa, CopyMD);
169
170  // Set the size of the copy to 0, it will be deleted on the next iteration.
171  MI->setArgOperand(2, Constant::getNullValue(MemOpLength->getType()));
172  return MI;
173}
174
175Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
176  unsigned Alignment = getKnownAlignment(MI->getDest(), DL, MI, AC, DT);
177  if (MI->getAlignment() < Alignment) {
178    MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
179                                             Alignment, false));
180    return MI;
181  }
182
183  // Extract the length and alignment and fill if they are constant.
184  ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
185  ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
186  if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
187    return nullptr;
188  uint64_t Len = LenC->getLimitedValue();
189  Alignment = MI->getAlignment();
190  assert(Len && "0-sized memory setting should be removed already.");
191
192  // memset(s,c,n) -> store s, c (for n=1,2,4,8)
193  if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
194    Type *ITy = IntegerType::get(MI->getContext(), Len*8);  // n=1 -> i8.
195
196    Value *Dest = MI->getDest();
197    unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
198    Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
199    Dest = Builder->CreateBitCast(Dest, NewDstPtrTy);
200
201    // Alignment 0 is identity for alignment 1 for memset, but not store.
202    if (Alignment == 0) Alignment = 1;
203
204    // Extract the fill value and store.
205    uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
206    StoreInst *S = Builder->CreateStore(ConstantInt::get(ITy, Fill), Dest,
207                                        MI->isVolatile());
208    S->setAlignment(Alignment);
209
210    // Set the size of the copy to 0, it will be deleted on the next iteration.
211    MI->setLength(Constant::getNullValue(LenC->getType()));
212    return MI;
213  }
214
215  return nullptr;
216}
217
218static Value *simplifyX86immShift(const IntrinsicInst &II,
219                                  InstCombiner::BuilderTy &Builder) {
220  bool LogicalShift = false;
221  bool ShiftLeft = false;
222
223  switch (II.getIntrinsicID()) {
224  default:
225    return nullptr;
226  case Intrinsic::x86_sse2_psra_d:
227  case Intrinsic::x86_sse2_psra_w:
228  case Intrinsic::x86_sse2_psrai_d:
229  case Intrinsic::x86_sse2_psrai_w:
230  case Intrinsic::x86_avx2_psra_d:
231  case Intrinsic::x86_avx2_psra_w:
232  case Intrinsic::x86_avx2_psrai_d:
233  case Intrinsic::x86_avx2_psrai_w:
234    LogicalShift = false; ShiftLeft = false;
235    break;
236  case Intrinsic::x86_sse2_psrl_d:
237  case Intrinsic::x86_sse2_psrl_q:
238  case Intrinsic::x86_sse2_psrl_w:
239  case Intrinsic::x86_sse2_psrli_d:
240  case Intrinsic::x86_sse2_psrli_q:
241  case Intrinsic::x86_sse2_psrli_w:
242  case Intrinsic::x86_avx2_psrl_d:
243  case Intrinsic::x86_avx2_psrl_q:
244  case Intrinsic::x86_avx2_psrl_w:
245  case Intrinsic::x86_avx2_psrli_d:
246  case Intrinsic::x86_avx2_psrli_q:
247  case Intrinsic::x86_avx2_psrli_w:
248    LogicalShift = true; ShiftLeft = false;
249    break;
250  case Intrinsic::x86_sse2_psll_d:
251  case Intrinsic::x86_sse2_psll_q:
252  case Intrinsic::x86_sse2_psll_w:
253  case Intrinsic::x86_sse2_pslli_d:
254  case Intrinsic::x86_sse2_pslli_q:
255  case Intrinsic::x86_sse2_pslli_w:
256  case Intrinsic::x86_avx2_psll_d:
257  case Intrinsic::x86_avx2_psll_q:
258  case Intrinsic::x86_avx2_psll_w:
259  case Intrinsic::x86_avx2_pslli_d:
260  case Intrinsic::x86_avx2_pslli_q:
261  case Intrinsic::x86_avx2_pslli_w:
262    LogicalShift = true; ShiftLeft = true;
263    break;
264  }
265  assert((LogicalShift || !ShiftLeft) && "Only logical shifts can shift left");
266
267  // Simplify if count is constant.
268  auto Arg1 = II.getArgOperand(1);
269  auto CAZ = dyn_cast<ConstantAggregateZero>(Arg1);
270  auto CDV = dyn_cast<ConstantDataVector>(Arg1);
271  auto CInt = dyn_cast<ConstantInt>(Arg1);
272  if (!CAZ && !CDV && !CInt)
273    return nullptr;
274
275  APInt Count(64, 0);
276  if (CDV) {
277    // SSE2/AVX2 uses all the first 64-bits of the 128-bit vector
278    // operand to compute the shift amount.
279    auto VT = cast<VectorType>(CDV->getType());
280    unsigned BitWidth = VT->getElementType()->getPrimitiveSizeInBits();
281    assert((64 % BitWidth) == 0 && "Unexpected packed shift size");
282    unsigned NumSubElts = 64 / BitWidth;
283
284    // Concatenate the sub-elements to create the 64-bit value.
285    for (unsigned i = 0; i != NumSubElts; ++i) {
286      unsigned SubEltIdx = (NumSubElts - 1) - i;
287      auto SubElt = cast<ConstantInt>(CDV->getElementAsConstant(SubEltIdx));
288      Count = Count.shl(BitWidth);
289      Count |= SubElt->getValue().zextOrTrunc(64);
290    }
291  }
292  else if (CInt)
293    Count = CInt->getValue();
294
295  auto Vec = II.getArgOperand(0);
296  auto VT = cast<VectorType>(Vec->getType());
297  auto SVT = VT->getElementType();
298  unsigned VWidth = VT->getNumElements();
299  unsigned BitWidth = SVT->getPrimitiveSizeInBits();
300
301  // If shift-by-zero then just return the original value.
302  if (Count == 0)
303    return Vec;
304
305  // Handle cases when Shift >= BitWidth.
306  if (Count.uge(BitWidth)) {
307    // If LogicalShift - just return zero.
308    if (LogicalShift)
309      return ConstantAggregateZero::get(VT);
310
311    // If ArithmeticShift - clamp Shift to (BitWidth - 1).
312    Count = APInt(64, BitWidth - 1);
313  }
314
315  // Get a constant vector of the same type as the first operand.
316  auto ShiftAmt = ConstantInt::get(SVT, Count.zextOrTrunc(BitWidth));
317  auto ShiftVec = Builder.CreateVectorSplat(VWidth, ShiftAmt);
318
319  if (ShiftLeft)
320    return Builder.CreateShl(Vec, ShiftVec);
321
322  if (LogicalShift)
323    return Builder.CreateLShr(Vec, ShiftVec);
324
325  return Builder.CreateAShr(Vec, ShiftVec);
326}
327
328// Attempt to simplify AVX2 per-element shift intrinsics to a generic IR shift.
329// Unlike the generic IR shifts, the intrinsics have defined behaviour for out
330// of range shift amounts (logical - set to zero, arithmetic - splat sign bit).
331static Value *simplifyX86varShift(const IntrinsicInst &II,
332                                  InstCombiner::BuilderTy &Builder) {
333  bool LogicalShift = false;
334  bool ShiftLeft = false;
335
336  switch (II.getIntrinsicID()) {
337  default:
338    return nullptr;
339  case Intrinsic::x86_avx2_psrav_d:
340  case Intrinsic::x86_avx2_psrav_d_256:
341    LogicalShift = false;
342    ShiftLeft = false;
343    break;
344  case Intrinsic::x86_avx2_psrlv_d:
345  case Intrinsic::x86_avx2_psrlv_d_256:
346  case Intrinsic::x86_avx2_psrlv_q:
347  case Intrinsic::x86_avx2_psrlv_q_256:
348    LogicalShift = true;
349    ShiftLeft = false;
350    break;
351  case Intrinsic::x86_avx2_psllv_d:
352  case Intrinsic::x86_avx2_psllv_d_256:
353  case Intrinsic::x86_avx2_psllv_q:
354  case Intrinsic::x86_avx2_psllv_q_256:
355    LogicalShift = true;
356    ShiftLeft = true;
357    break;
358  }
359  assert((LogicalShift || !ShiftLeft) && "Only logical shifts can shift left");
360
361  // Simplify if all shift amounts are constant/undef.
362  auto *CShift = dyn_cast<Constant>(II.getArgOperand(1));
363  if (!CShift)
364    return nullptr;
365
366  auto Vec = II.getArgOperand(0);
367  auto VT = cast<VectorType>(II.getType());
368  auto SVT = VT->getVectorElementType();
369  int NumElts = VT->getNumElements();
370  int BitWidth = SVT->getIntegerBitWidth();
371
372  // Collect each element's shift amount.
373  // We also collect special cases: UNDEF = -1, OUT-OF-RANGE = BitWidth.
374  bool AnyOutOfRange = false;
375  SmallVector<int, 8> ShiftAmts;
376  for (int I = 0; I < NumElts; ++I) {
377    auto *CElt = CShift->getAggregateElement(I);
378    if (CElt && isa<UndefValue>(CElt)) {
379      ShiftAmts.push_back(-1);
380      continue;
381    }
382
383    auto *COp = dyn_cast_or_null<ConstantInt>(CElt);
384    if (!COp)
385      return nullptr;
386
387    // Handle out of range shifts.
388    // If LogicalShift - set to BitWidth (special case).
389    // If ArithmeticShift - set to (BitWidth - 1) (sign splat).
390    APInt ShiftVal = COp->getValue();
391    if (ShiftVal.uge(BitWidth)) {
392      AnyOutOfRange = LogicalShift;
393      ShiftAmts.push_back(LogicalShift ? BitWidth : BitWidth - 1);
394      continue;
395    }
396
397    ShiftAmts.push_back((int)ShiftVal.getZExtValue());
398  }
399
400  // If all elements out of range or UNDEF, return vector of zeros/undefs.
401  // ArithmeticShift should only hit this if they are all UNDEF.
402  auto OutOfRange = [&](int Idx) { return (Idx < 0) || (BitWidth <= Idx); };
403  if (llvm::all_of(ShiftAmts, OutOfRange)) {
404    SmallVector<Constant *, 8> ConstantVec;
405    for (int Idx : ShiftAmts) {
406      if (Idx < 0) {
407        ConstantVec.push_back(UndefValue::get(SVT));
408      } else {
409        assert(LogicalShift && "Logical shift expected");
410        ConstantVec.push_back(ConstantInt::getNullValue(SVT));
411      }
412    }
413    return ConstantVector::get(ConstantVec);
414  }
415
416  // We can't handle only some out of range values with generic logical shifts.
417  if (AnyOutOfRange)
418    return nullptr;
419
420  // Build the shift amount constant vector.
421  SmallVector<Constant *, 8> ShiftVecAmts;
422  for (int Idx : ShiftAmts) {
423    if (Idx < 0)
424      ShiftVecAmts.push_back(UndefValue::get(SVT));
425    else
426      ShiftVecAmts.push_back(ConstantInt::get(SVT, Idx));
427  }
428  auto ShiftVec = ConstantVector::get(ShiftVecAmts);
429
430  if (ShiftLeft)
431    return Builder.CreateShl(Vec, ShiftVec);
432
433  if (LogicalShift)
434    return Builder.CreateLShr(Vec, ShiftVec);
435
436  return Builder.CreateAShr(Vec, ShiftVec);
437}
438
439static Value *simplifyX86movmsk(const IntrinsicInst &II,
440                                InstCombiner::BuilderTy &Builder) {
441  Value *Arg = II.getArgOperand(0);
442  Type *ResTy = II.getType();
443  Type *ArgTy = Arg->getType();
444
445  // movmsk(undef) -> zero as we must ensure the upper bits are zero.
446  if (isa<UndefValue>(Arg))
447    return Constant::getNullValue(ResTy);
448
449  // We can't easily peek through x86_mmx types.
450  if (!ArgTy->isVectorTy())
451    return nullptr;
452
453  auto *C = dyn_cast<Constant>(Arg);
454  if (!C)
455    return nullptr;
456
457  // Extract signbits of the vector input and pack into integer result.
458  APInt Result(ResTy->getPrimitiveSizeInBits(), 0);
459  for (unsigned I = 0, E = ArgTy->getVectorNumElements(); I != E; ++I) {
460    auto *COp = C->getAggregateElement(I);
461    if (!COp)
462      return nullptr;
463    if (isa<UndefValue>(COp))
464      continue;
465
466    auto *CInt = dyn_cast<ConstantInt>(COp);
467    auto *CFp = dyn_cast<ConstantFP>(COp);
468    if (!CInt && !CFp)
469      return nullptr;
470
471    if ((CInt && CInt->isNegative()) || (CFp && CFp->isNegative()))
472      Result.setBit(I);
473  }
474
475  return Constant::getIntegerValue(ResTy, Result);
476}
477
478static Value *simplifyX86insertps(const IntrinsicInst &II,
479                                  InstCombiner::BuilderTy &Builder) {
480  auto *CInt = dyn_cast<ConstantInt>(II.getArgOperand(2));
481  if (!CInt)
482    return nullptr;
483
484  VectorType *VecTy = cast<VectorType>(II.getType());
485  assert(VecTy->getNumElements() == 4 && "insertps with wrong vector type");
486
487  // The immediate permute control byte looks like this:
488  //    [3:0] - zero mask for each 32-bit lane
489  //    [5:4] - select one 32-bit destination lane
490  //    [7:6] - select one 32-bit source lane
491
492  uint8_t Imm = CInt->getZExtValue();
493  uint8_t ZMask = Imm & 0xf;
494  uint8_t DestLane = (Imm >> 4) & 0x3;
495  uint8_t SourceLane = (Imm >> 6) & 0x3;
496
497  ConstantAggregateZero *ZeroVector = ConstantAggregateZero::get(VecTy);
498
499  // If all zero mask bits are set, this was just a weird way to
500  // generate a zero vector.
501  if (ZMask == 0xf)
502    return ZeroVector;
503
504  // Initialize by passing all of the first source bits through.
505  uint32_t ShuffleMask[4] = { 0, 1, 2, 3 };
506
507  // We may replace the second operand with the zero vector.
508  Value *V1 = II.getArgOperand(1);
509
510  if (ZMask) {
511    // If the zero mask is being used with a single input or the zero mask
512    // overrides the destination lane, this is a shuffle with the zero vector.
513    if ((II.getArgOperand(0) == II.getArgOperand(1)) ||
514        (ZMask & (1 << DestLane))) {
515      V1 = ZeroVector;
516      // We may still move 32-bits of the first source vector from one lane
517      // to another.
518      ShuffleMask[DestLane] = SourceLane;
519      // The zero mask may override the previous insert operation.
520      for (unsigned i = 0; i < 4; ++i)
521        if ((ZMask >> i) & 0x1)
522          ShuffleMask[i] = i + 4;
523    } else {
524      // TODO: Model this case as 2 shuffles or a 'logical and' plus shuffle?
525      return nullptr;
526    }
527  } else {
528    // Replace the selected destination lane with the selected source lane.
529    ShuffleMask[DestLane] = SourceLane + 4;
530  }
531
532  return Builder.CreateShuffleVector(II.getArgOperand(0), V1, ShuffleMask);
533}
534
535/// Attempt to simplify SSE4A EXTRQ/EXTRQI instructions using constant folding
536/// or conversion to a shuffle vector.
537static Value *simplifyX86extrq(IntrinsicInst &II, Value *Op0,
538                               ConstantInt *CILength, ConstantInt *CIIndex,
539                               InstCombiner::BuilderTy &Builder) {
540  auto LowConstantHighUndef = [&](uint64_t Val) {
541    Type *IntTy64 = Type::getInt64Ty(II.getContext());
542    Constant *Args[] = {ConstantInt::get(IntTy64, Val),
543                        UndefValue::get(IntTy64)};
544    return ConstantVector::get(Args);
545  };
546
547  // See if we're dealing with constant values.
548  Constant *C0 = dyn_cast<Constant>(Op0);
549  ConstantInt *CI0 =
550      C0 ? dyn_cast<ConstantInt>(C0->getAggregateElement((unsigned)0))
551         : nullptr;
552
553  // Attempt to constant fold.
554  if (CILength && CIIndex) {
555    // From AMD documentation: "The bit index and field length are each six
556    // bits in length other bits of the field are ignored."
557    APInt APIndex = CIIndex->getValue().zextOrTrunc(6);
558    APInt APLength = CILength->getValue().zextOrTrunc(6);
559
560    unsigned Index = APIndex.getZExtValue();
561
562    // From AMD documentation: "a value of zero in the field length is
563    // defined as length of 64".
564    unsigned Length = APLength == 0 ? 64 : APLength.getZExtValue();
565
566    // From AMD documentation: "If the sum of the bit index + length field
567    // is greater than 64, the results are undefined".
568    unsigned End = Index + Length;
569
570    // Note that both field index and field length are 8-bit quantities.
571    // Since variables 'Index' and 'Length' are unsigned values
572    // obtained from zero-extending field index and field length
573    // respectively, their sum should never wrap around.
574    if (End > 64)
575      return UndefValue::get(II.getType());
576
577    // If we are inserting whole bytes, we can convert this to a shuffle.
578    // Lowering can recognize EXTRQI shuffle masks.
579    if ((Length % 8) == 0 && (Index % 8) == 0) {
580      // Convert bit indices to byte indices.
581      Length /= 8;
582      Index /= 8;
583
584      Type *IntTy8 = Type::getInt8Ty(II.getContext());
585      Type *IntTy32 = Type::getInt32Ty(II.getContext());
586      VectorType *ShufTy = VectorType::get(IntTy8, 16);
587
588      SmallVector<Constant *, 16> ShuffleMask;
589      for (int i = 0; i != (int)Length; ++i)
590        ShuffleMask.push_back(
591            Constant::getIntegerValue(IntTy32, APInt(32, i + Index)));
592      for (int i = Length; i != 8; ++i)
593        ShuffleMask.push_back(
594            Constant::getIntegerValue(IntTy32, APInt(32, i + 16)));
595      for (int i = 8; i != 16; ++i)
596        ShuffleMask.push_back(UndefValue::get(IntTy32));
597
598      Value *SV = Builder.CreateShuffleVector(
599          Builder.CreateBitCast(Op0, ShufTy),
600          ConstantAggregateZero::get(ShufTy), ConstantVector::get(ShuffleMask));
601      return Builder.CreateBitCast(SV, II.getType());
602    }
603
604    // Constant Fold - shift Index'th bit to lowest position and mask off
605    // Length bits.
606    if (CI0) {
607      APInt Elt = CI0->getValue();
608      Elt = Elt.lshr(Index).zextOrTrunc(Length);
609      return LowConstantHighUndef(Elt.getZExtValue());
610    }
611
612    // If we were an EXTRQ call, we'll save registers if we convert to EXTRQI.
613    if (II.getIntrinsicID() == Intrinsic::x86_sse4a_extrq) {
614      Value *Args[] = {Op0, CILength, CIIndex};
615      Module *M = II.getModule();
616      Value *F = Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_extrqi);
617      return Builder.CreateCall(F, Args);
618    }
619  }
620
621  // Constant Fold - extraction from zero is always {zero, undef}.
622  if (CI0 && CI0->equalsInt(0))
623    return LowConstantHighUndef(0);
624
625  return nullptr;
626}
627
628/// Attempt to simplify SSE4A INSERTQ/INSERTQI instructions using constant
629/// folding or conversion to a shuffle vector.
630static Value *simplifyX86insertq(IntrinsicInst &II, Value *Op0, Value *Op1,
631                                 APInt APLength, APInt APIndex,
632                                 InstCombiner::BuilderTy &Builder) {
633
634  // From AMD documentation: "The bit index and field length are each six bits
635  // in length other bits of the field are ignored."
636  APIndex = APIndex.zextOrTrunc(6);
637  APLength = APLength.zextOrTrunc(6);
638
639  // Attempt to constant fold.
640  unsigned Index = APIndex.getZExtValue();
641
642  // From AMD documentation: "a value of zero in the field length is
643  // defined as length of 64".
644  unsigned Length = APLength == 0 ? 64 : APLength.getZExtValue();
645
646  // From AMD documentation: "If the sum of the bit index + length field
647  // is greater than 64, the results are undefined".
648  unsigned End = Index + Length;
649
650  // Note that both field index and field length are 8-bit quantities.
651  // Since variables 'Index' and 'Length' are unsigned values
652  // obtained from zero-extending field index and field length
653  // respectively, their sum should never wrap around.
654  if (End > 64)
655    return UndefValue::get(II.getType());
656
657  // If we are inserting whole bytes, we can convert this to a shuffle.
658  // Lowering can recognize INSERTQI shuffle masks.
659  if ((Length % 8) == 0 && (Index % 8) == 0) {
660    // Convert bit indices to byte indices.
661    Length /= 8;
662    Index /= 8;
663
664    Type *IntTy8 = Type::getInt8Ty(II.getContext());
665    Type *IntTy32 = Type::getInt32Ty(II.getContext());
666    VectorType *ShufTy = VectorType::get(IntTy8, 16);
667
668    SmallVector<Constant *, 16> ShuffleMask;
669    for (int i = 0; i != (int)Index; ++i)
670      ShuffleMask.push_back(Constant::getIntegerValue(IntTy32, APInt(32, i)));
671    for (int i = 0; i != (int)Length; ++i)
672      ShuffleMask.push_back(
673          Constant::getIntegerValue(IntTy32, APInt(32, i + 16)));
674    for (int i = Index + Length; i != 8; ++i)
675      ShuffleMask.push_back(Constant::getIntegerValue(IntTy32, APInt(32, i)));
676    for (int i = 8; i != 16; ++i)
677      ShuffleMask.push_back(UndefValue::get(IntTy32));
678
679    Value *SV = Builder.CreateShuffleVector(Builder.CreateBitCast(Op0, ShufTy),
680                                            Builder.CreateBitCast(Op1, ShufTy),
681                                            ConstantVector::get(ShuffleMask));
682    return Builder.CreateBitCast(SV, II.getType());
683  }
684
685  // See if we're dealing with constant values.
686  Constant *C0 = dyn_cast<Constant>(Op0);
687  Constant *C1 = dyn_cast<Constant>(Op1);
688  ConstantInt *CI00 =
689      C0 ? dyn_cast<ConstantInt>(C0->getAggregateElement((unsigned)0))
690         : nullptr;
691  ConstantInt *CI10 =
692      C1 ? dyn_cast<ConstantInt>(C1->getAggregateElement((unsigned)0))
693         : nullptr;
694
695  // Constant Fold - insert bottom Length bits starting at the Index'th bit.
696  if (CI00 && CI10) {
697    APInt V00 = CI00->getValue();
698    APInt V10 = CI10->getValue();
699    APInt Mask = APInt::getLowBitsSet(64, Length).shl(Index);
700    V00 = V00 & ~Mask;
701    V10 = V10.zextOrTrunc(Length).zextOrTrunc(64).shl(Index);
702    APInt Val = V00 | V10;
703    Type *IntTy64 = Type::getInt64Ty(II.getContext());
704    Constant *Args[] = {ConstantInt::get(IntTy64, Val.getZExtValue()),
705                        UndefValue::get(IntTy64)};
706    return ConstantVector::get(Args);
707  }
708
709  // If we were an INSERTQ call, we'll save demanded elements if we convert to
710  // INSERTQI.
711  if (II.getIntrinsicID() == Intrinsic::x86_sse4a_insertq) {
712    Type *IntTy8 = Type::getInt8Ty(II.getContext());
713    Constant *CILength = ConstantInt::get(IntTy8, Length, false);
714    Constant *CIIndex = ConstantInt::get(IntTy8, Index, false);
715
716    Value *Args[] = {Op0, Op1, CILength, CIIndex};
717    Module *M = II.getModule();
718    Value *F = Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_insertqi);
719    return Builder.CreateCall(F, Args);
720  }
721
722  return nullptr;
723}
724
725/// Attempt to convert pshufb* to shufflevector if the mask is constant.
726static Value *simplifyX86pshufb(const IntrinsicInst &II,
727                                InstCombiner::BuilderTy &Builder) {
728  Constant *V = dyn_cast<Constant>(II.getArgOperand(1));
729  if (!V)
730    return nullptr;
731
732  auto *VecTy = cast<VectorType>(II.getType());
733  auto *MaskEltTy = Type::getInt32Ty(II.getContext());
734  unsigned NumElts = VecTy->getNumElements();
735  assert((NumElts == 16 || NumElts == 32) &&
736         "Unexpected number of elements in shuffle mask!");
737
738  // Construct a shuffle mask from constant integers or UNDEFs.
739  Constant *Indexes[32] = {NULL};
740
741  // Each byte in the shuffle control mask forms an index to permute the
742  // corresponding byte in the destination operand.
743  for (unsigned I = 0; I < NumElts; ++I) {
744    Constant *COp = V->getAggregateElement(I);
745    if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp)))
746      return nullptr;
747
748    if (isa<UndefValue>(COp)) {
749      Indexes[I] = UndefValue::get(MaskEltTy);
750      continue;
751    }
752
753    int8_t Index = cast<ConstantInt>(COp)->getValue().getZExtValue();
754
755    // If the most significant bit (bit[7]) of each byte of the shuffle
756    // control mask is set, then zero is written in the result byte.
757    // The zero vector is in the right-hand side of the resulting
758    // shufflevector.
759
760    // The value of each index for the high 128-bit lane is the least
761    // significant 4 bits of the respective shuffle control byte.
762    Index = ((Index < 0) ? NumElts : Index & 0x0F) + (I & 0xF0);
763    Indexes[I] = ConstantInt::get(MaskEltTy, Index);
764  }
765
766  auto ShuffleMask = ConstantVector::get(makeArrayRef(Indexes, NumElts));
767  auto V1 = II.getArgOperand(0);
768  auto V2 = Constant::getNullValue(VecTy);
769  return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
770}
771
772/// Attempt to convert vpermilvar* to shufflevector if the mask is constant.
773static Value *simplifyX86vpermilvar(const IntrinsicInst &II,
774                                    InstCombiner::BuilderTy &Builder) {
775  Constant *V = dyn_cast<Constant>(II.getArgOperand(1));
776  if (!V)
777    return nullptr;
778
779  auto *MaskEltTy = Type::getInt32Ty(II.getContext());
780  unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
781  assert(NumElts == 8 || NumElts == 4 || NumElts == 2);
782
783  // Construct a shuffle mask from constant integers or UNDEFs.
784  Constant *Indexes[8] = {NULL};
785
786  // The intrinsics only read one or two bits, clear the rest.
787  for (unsigned I = 0; I < NumElts; ++I) {
788    Constant *COp = V->getAggregateElement(I);
789    if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp)))
790      return nullptr;
791
792    if (isa<UndefValue>(COp)) {
793      Indexes[I] = UndefValue::get(MaskEltTy);
794      continue;
795    }
796
797    APInt Index = cast<ConstantInt>(COp)->getValue();
798    Index = Index.zextOrTrunc(32).getLoBits(2);
799
800    // The PD variants uses bit 1 to select per-lane element index, so
801    // shift down to convert to generic shuffle mask index.
802    if (II.getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd ||
803        II.getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd_256)
804      Index = Index.lshr(1);
805
806    // The _256 variants are a bit trickier since the mask bits always index
807    // into the corresponding 128 half. In order to convert to a generic
808    // shuffle, we have to make that explicit.
809    if ((II.getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_ps_256 ||
810         II.getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd_256) &&
811        ((NumElts / 2) <= I)) {
812      Index += APInt(32, NumElts / 2);
813    }
814
815    Indexes[I] = ConstantInt::get(MaskEltTy, Index);
816  }
817
818  auto ShuffleMask = ConstantVector::get(makeArrayRef(Indexes, NumElts));
819  auto V1 = II.getArgOperand(0);
820  auto V2 = UndefValue::get(V1->getType());
821  return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
822}
823
824/// Attempt to convert vpermd/vpermps to shufflevector if the mask is constant.
825static Value *simplifyX86vpermv(const IntrinsicInst &II,
826                                InstCombiner::BuilderTy &Builder) {
827  auto *V = dyn_cast<Constant>(II.getArgOperand(1));
828  if (!V)
829    return nullptr;
830
831  auto *VecTy = cast<VectorType>(II.getType());
832  auto *MaskEltTy = Type::getInt32Ty(II.getContext());
833  unsigned Size = VecTy->getNumElements();
834  assert(Size == 8 && "Unexpected shuffle mask size");
835
836  // Construct a shuffle mask from constant integers or UNDEFs.
837  Constant *Indexes[8] = {NULL};
838
839  for (unsigned I = 0; I < Size; ++I) {
840    Constant *COp = V->getAggregateElement(I);
841    if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp)))
842      return nullptr;
843
844    if (isa<UndefValue>(COp)) {
845      Indexes[I] = UndefValue::get(MaskEltTy);
846      continue;
847    }
848
849    APInt Index = cast<ConstantInt>(COp)->getValue();
850    Index = Index.zextOrTrunc(32).getLoBits(3);
851    Indexes[I] = ConstantInt::get(MaskEltTy, Index);
852  }
853
854  auto ShuffleMask = ConstantVector::get(makeArrayRef(Indexes, Size));
855  auto V1 = II.getArgOperand(0);
856  auto V2 = UndefValue::get(VecTy);
857  return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
858}
859
860/// The shuffle mask for a perm2*128 selects any two halves of two 256-bit
861/// source vectors, unless a zero bit is set. If a zero bit is set,
862/// then ignore that half of the mask and clear that half of the vector.
863static Value *simplifyX86vperm2(const IntrinsicInst &II,
864                                InstCombiner::BuilderTy &Builder) {
865  auto *CInt = dyn_cast<ConstantInt>(II.getArgOperand(2));
866  if (!CInt)
867    return nullptr;
868
869  VectorType *VecTy = cast<VectorType>(II.getType());
870  ConstantAggregateZero *ZeroVector = ConstantAggregateZero::get(VecTy);
871
872  // The immediate permute control byte looks like this:
873  //    [1:0] - select 128 bits from sources for low half of destination
874  //    [2]   - ignore
875  //    [3]   - zero low half of destination
876  //    [5:4] - select 128 bits from sources for high half of destination
877  //    [6]   - ignore
878  //    [7]   - zero high half of destination
879
880  uint8_t Imm = CInt->getZExtValue();
881
882  bool LowHalfZero = Imm & 0x08;
883  bool HighHalfZero = Imm & 0x80;
884
885  // If both zero mask bits are set, this was just a weird way to
886  // generate a zero vector.
887  if (LowHalfZero && HighHalfZero)
888    return ZeroVector;
889
890  // If 0 or 1 zero mask bits are set, this is a simple shuffle.
891  unsigned NumElts = VecTy->getNumElements();
892  unsigned HalfSize = NumElts / 2;
893  SmallVector<uint32_t, 8> ShuffleMask(NumElts);
894
895  // The high bit of the selection field chooses the 1st or 2nd operand.
896  bool LowInputSelect = Imm & 0x02;
897  bool HighInputSelect = Imm & 0x20;
898
899  // The low bit of the selection field chooses the low or high half
900  // of the selected operand.
901  bool LowHalfSelect = Imm & 0x01;
902  bool HighHalfSelect = Imm & 0x10;
903
904  // Determine which operand(s) are actually in use for this instruction.
905  Value *V0 = LowInputSelect ? II.getArgOperand(1) : II.getArgOperand(0);
906  Value *V1 = HighInputSelect ? II.getArgOperand(1) : II.getArgOperand(0);
907
908  // If needed, replace operands based on zero mask.
909  V0 = LowHalfZero ? ZeroVector : V0;
910  V1 = HighHalfZero ? ZeroVector : V1;
911
912  // Permute low half of result.
913  unsigned StartIndex = LowHalfSelect ? HalfSize : 0;
914  for (unsigned i = 0; i < HalfSize; ++i)
915    ShuffleMask[i] = StartIndex + i;
916
917  // Permute high half of result.
918  StartIndex = HighHalfSelect ? HalfSize : 0;
919  StartIndex += NumElts;
920  for (unsigned i = 0; i < HalfSize; ++i)
921    ShuffleMask[i + HalfSize] = StartIndex + i;
922
923  return Builder.CreateShuffleVector(V0, V1, ShuffleMask);
924}
925
926/// Decode XOP integer vector comparison intrinsics.
927static Value *simplifyX86vpcom(const IntrinsicInst &II,
928                               InstCombiner::BuilderTy &Builder,
929                               bool IsSigned) {
930  if (auto *CInt = dyn_cast<ConstantInt>(II.getArgOperand(2))) {
931    uint64_t Imm = CInt->getZExtValue() & 0x7;
932    VectorType *VecTy = cast<VectorType>(II.getType());
933    CmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
934
935    switch (Imm) {
936    case 0x0:
937      Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
938      break;
939    case 0x1:
940      Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
941      break;
942    case 0x2:
943      Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
944      break;
945    case 0x3:
946      Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
947      break;
948    case 0x4:
949      Pred = ICmpInst::ICMP_EQ; break;
950    case 0x5:
951      Pred = ICmpInst::ICMP_NE; break;
952    case 0x6:
953      return ConstantInt::getSigned(VecTy, 0); // FALSE
954    case 0x7:
955      return ConstantInt::getSigned(VecTy, -1); // TRUE
956    }
957
958    if (Value *Cmp = Builder.CreateICmp(Pred, II.getArgOperand(0),
959                                        II.getArgOperand(1)))
960      return Builder.CreateSExtOrTrunc(Cmp, VecTy);
961  }
962  return nullptr;
963}
964
965static Value *simplifyMinnumMaxnum(const IntrinsicInst &II) {
966  Value *Arg0 = II.getArgOperand(0);
967  Value *Arg1 = II.getArgOperand(1);
968
969  // fmin(x, x) -> x
970  if (Arg0 == Arg1)
971    return Arg0;
972
973  const auto *C1 = dyn_cast<ConstantFP>(Arg1);
974
975  // fmin(x, nan) -> x
976  if (C1 && C1->isNaN())
977    return Arg0;
978
979  // This is the value because if undef were NaN, we would return the other
980  // value and cannot return a NaN unless both operands are.
981  //
982  // fmin(undef, x) -> x
983  if (isa<UndefValue>(Arg0))
984    return Arg1;
985
986  // fmin(x, undef) -> x
987  if (isa<UndefValue>(Arg1))
988    return Arg0;
989
990  Value *X = nullptr;
991  Value *Y = nullptr;
992  if (II.getIntrinsicID() == Intrinsic::minnum) {
993    // fmin(x, fmin(x, y)) -> fmin(x, y)
994    // fmin(y, fmin(x, y)) -> fmin(x, y)
995    if (match(Arg1, m_FMin(m_Value(X), m_Value(Y)))) {
996      if (Arg0 == X || Arg0 == Y)
997        return Arg1;
998    }
999
1000    // fmin(fmin(x, y), x) -> fmin(x, y)
1001    // fmin(fmin(x, y), y) -> fmin(x, y)
1002    if (match(Arg0, m_FMin(m_Value(X), m_Value(Y)))) {
1003      if (Arg1 == X || Arg1 == Y)
1004        return Arg0;
1005    }
1006
1007    // TODO: fmin(nnan x, inf) -> x
1008    // TODO: fmin(nnan ninf x, flt_max) -> x
1009    if (C1 && C1->isInfinity()) {
1010      // fmin(x, -inf) -> -inf
1011      if (C1->isNegative())
1012        return Arg1;
1013    }
1014  } else {
1015    assert(II.getIntrinsicID() == Intrinsic::maxnum);
1016    // fmax(x, fmax(x, y)) -> fmax(x, y)
1017    // fmax(y, fmax(x, y)) -> fmax(x, y)
1018    if (match(Arg1, m_FMax(m_Value(X), m_Value(Y)))) {
1019      if (Arg0 == X || Arg0 == Y)
1020        return Arg1;
1021    }
1022
1023    // fmax(fmax(x, y), x) -> fmax(x, y)
1024    // fmax(fmax(x, y), y) -> fmax(x, y)
1025    if (match(Arg0, m_FMax(m_Value(X), m_Value(Y)))) {
1026      if (Arg1 == X || Arg1 == Y)
1027        return Arg0;
1028    }
1029
1030    // TODO: fmax(nnan x, -inf) -> x
1031    // TODO: fmax(nnan ninf x, -flt_max) -> x
1032    if (C1 && C1->isInfinity()) {
1033      // fmax(x, inf) -> inf
1034      if (!C1->isNegative())
1035        return Arg1;
1036    }
1037  }
1038  return nullptr;
1039}
1040
1041static bool maskIsAllOneOrUndef(Value *Mask) {
1042  auto *ConstMask = dyn_cast<Constant>(Mask);
1043  if (!ConstMask)
1044    return false;
1045  if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask))
1046    return true;
1047  for (unsigned I = 0, E = ConstMask->getType()->getVectorNumElements(); I != E;
1048       ++I) {
1049    if (auto *MaskElt = ConstMask->getAggregateElement(I))
1050      if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt))
1051        continue;
1052    return false;
1053  }
1054  return true;
1055}
1056
1057static Value *simplifyMaskedLoad(const IntrinsicInst &II,
1058                                 InstCombiner::BuilderTy &Builder) {
1059  // If the mask is all ones or undefs, this is a plain vector load of the 1st
1060  // argument.
1061  if (maskIsAllOneOrUndef(II.getArgOperand(2))) {
1062    Value *LoadPtr = II.getArgOperand(0);
1063    unsigned Alignment = cast<ConstantInt>(II.getArgOperand(1))->getZExtValue();
1064    return Builder.CreateAlignedLoad(LoadPtr, Alignment, "unmaskedload");
1065  }
1066
1067  return nullptr;
1068}
1069
1070static Instruction *simplifyMaskedStore(IntrinsicInst &II, InstCombiner &IC) {
1071  auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3));
1072  if (!ConstMask)
1073    return nullptr;
1074
1075  // If the mask is all zeros, this instruction does nothing.
1076  if (ConstMask->isNullValue())
1077    return IC.eraseInstFromFunction(II);
1078
1079  // If the mask is all ones, this is a plain vector store of the 1st argument.
1080  if (ConstMask->isAllOnesValue()) {
1081    Value *StorePtr = II.getArgOperand(1);
1082    unsigned Alignment = cast<ConstantInt>(II.getArgOperand(2))->getZExtValue();
1083    return new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment);
1084  }
1085
1086  return nullptr;
1087}
1088
1089static Instruction *simplifyMaskedGather(IntrinsicInst &II, InstCombiner &IC) {
1090  // If the mask is all zeros, return the "passthru" argument of the gather.
1091  auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(2));
1092  if (ConstMask && ConstMask->isNullValue())
1093    return IC.replaceInstUsesWith(II, II.getArgOperand(3));
1094
1095  return nullptr;
1096}
1097
1098static Instruction *simplifyMaskedScatter(IntrinsicInst &II, InstCombiner &IC) {
1099  // If the mask is all zeros, a scatter does nothing.
1100  auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3));
1101  if (ConstMask && ConstMask->isNullValue())
1102    return IC.eraseInstFromFunction(II);
1103
1104  return nullptr;
1105}
1106
1107// TODO: If the x86 backend knew how to convert a bool vector mask back to an
1108// XMM register mask efficiently, we could transform all x86 masked intrinsics
1109// to LLVM masked intrinsics and remove the x86 masked intrinsic defs.
1110static Instruction *simplifyX86MaskedLoad(IntrinsicInst &II, InstCombiner &IC) {
1111  Value *Ptr = II.getOperand(0);
1112  Value *Mask = II.getOperand(1);
1113  Constant *ZeroVec = Constant::getNullValue(II.getType());
1114
1115  // Special case a zero mask since that's not a ConstantDataVector.
1116  // This masked load instruction creates a zero vector.
1117  if (isa<ConstantAggregateZero>(Mask))
1118    return IC.replaceInstUsesWith(II, ZeroVec);
1119
1120  auto *ConstMask = dyn_cast<ConstantDataVector>(Mask);
1121  if (!ConstMask)
1122    return nullptr;
1123
1124  // The mask is constant. Convert this x86 intrinsic to the LLVM instrinsic
1125  // to allow target-independent optimizations.
1126
1127  // First, cast the x86 intrinsic scalar pointer to a vector pointer to match
1128  // the LLVM intrinsic definition for the pointer argument.
1129  unsigned AddrSpace = cast<PointerType>(Ptr->getType())->getAddressSpace();
1130  PointerType *VecPtrTy = PointerType::get(II.getType(), AddrSpace);
1131  Value *PtrCast = IC.Builder->CreateBitCast(Ptr, VecPtrTy, "castvec");
1132
1133  // Second, convert the x86 XMM integer vector mask to a vector of bools based
1134  // on each element's most significant bit (the sign bit).
1135  Constant *BoolMask = getNegativeIsTrueBoolVec(ConstMask);
1136
1137  // The pass-through vector for an x86 masked load is a zero vector.
1138  CallInst *NewMaskedLoad =
1139      IC.Builder->CreateMaskedLoad(PtrCast, 1, BoolMask, ZeroVec);
1140  return IC.replaceInstUsesWith(II, NewMaskedLoad);
1141}
1142
1143// TODO: If the x86 backend knew how to convert a bool vector mask back to an
1144// XMM register mask efficiently, we could transform all x86 masked intrinsics
1145// to LLVM masked intrinsics and remove the x86 masked intrinsic defs.
1146static bool simplifyX86MaskedStore(IntrinsicInst &II, InstCombiner &IC) {
1147  Value *Ptr = II.getOperand(0);
1148  Value *Mask = II.getOperand(1);
1149  Value *Vec = II.getOperand(2);
1150
1151  // Special case a zero mask since that's not a ConstantDataVector:
1152  // this masked store instruction does nothing.
1153  if (isa<ConstantAggregateZero>(Mask)) {
1154    IC.eraseInstFromFunction(II);
1155    return true;
1156  }
1157
1158  // The SSE2 version is too weird (eg, unaligned but non-temporal) to do
1159  // anything else at this level.
1160  if (II.getIntrinsicID() == Intrinsic::x86_sse2_maskmov_dqu)
1161    return false;
1162
1163  auto *ConstMask = dyn_cast<ConstantDataVector>(Mask);
1164  if (!ConstMask)
1165    return false;
1166
1167  // The mask is constant. Convert this x86 intrinsic to the LLVM instrinsic
1168  // to allow target-independent optimizations.
1169
1170  // First, cast the x86 intrinsic scalar pointer to a vector pointer to match
1171  // the LLVM intrinsic definition for the pointer argument.
1172  unsigned AddrSpace = cast<PointerType>(Ptr->getType())->getAddressSpace();
1173  PointerType *VecPtrTy = PointerType::get(Vec->getType(), AddrSpace);
1174  Value *PtrCast = IC.Builder->CreateBitCast(Ptr, VecPtrTy, "castvec");
1175
1176  // Second, convert the x86 XMM integer vector mask to a vector of bools based
1177  // on each element's most significant bit (the sign bit).
1178  Constant *BoolMask = getNegativeIsTrueBoolVec(ConstMask);
1179
1180  IC.Builder->CreateMaskedStore(Vec, PtrCast, 1, BoolMask);
1181
1182  // 'Replace uses' doesn't work for stores. Erase the original masked store.
1183  IC.eraseInstFromFunction(II);
1184  return true;
1185}
1186
1187// Returns true iff the 2 intrinsics have the same operands, limiting the
1188// comparison to the first NumOperands.
1189static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E,
1190                             unsigned NumOperands) {
1191  assert(I.getNumArgOperands() >= NumOperands && "Not enough operands");
1192  assert(E.getNumArgOperands() >= NumOperands && "Not enough operands");
1193  for (unsigned i = 0; i < NumOperands; i++)
1194    if (I.getArgOperand(i) != E.getArgOperand(i))
1195      return false;
1196  return true;
1197}
1198
1199// Remove trivially empty start/end intrinsic ranges, i.e. a start
1200// immediately followed by an end (ignoring debuginfo or other
1201// start/end intrinsics in between). As this handles only the most trivial
1202// cases, tracking the nesting level is not needed:
1203//
1204//   call @llvm.foo.start(i1 0) ; &I
1205//   call @llvm.foo.start(i1 0)
1206//   call @llvm.foo.end(i1 0) ; This one will not be skipped: it will be removed
1207//   call @llvm.foo.end(i1 0)
1208static bool removeTriviallyEmptyRange(IntrinsicInst &I, unsigned StartID,
1209                                      unsigned EndID, InstCombiner &IC) {
1210  assert(I.getIntrinsicID() == StartID &&
1211         "Start intrinsic does not have expected ID");
1212  BasicBlock::iterator BI(I), BE(I.getParent()->end());
1213  for (++BI; BI != BE; ++BI) {
1214    if (auto *E = dyn_cast<IntrinsicInst>(BI)) {
1215      if (isa<DbgInfoIntrinsic>(E) || E->getIntrinsicID() == StartID)
1216        continue;
1217      if (E->getIntrinsicID() == EndID &&
1218          haveSameOperands(I, *E, E->getNumArgOperands())) {
1219        IC.eraseInstFromFunction(*E);
1220        IC.eraseInstFromFunction(I);
1221        return true;
1222      }
1223    }
1224    break;
1225  }
1226
1227  return false;
1228}
1229
1230Instruction *InstCombiner::visitVAStartInst(VAStartInst &I) {
1231  removeTriviallyEmptyRange(I, Intrinsic::vastart, Intrinsic::vaend, *this);
1232  return nullptr;
1233}
1234
1235Instruction *InstCombiner::visitVACopyInst(VACopyInst &I) {
1236  removeTriviallyEmptyRange(I, Intrinsic::vacopy, Intrinsic::vaend, *this);
1237  return nullptr;
1238}
1239
1240/// CallInst simplification. This mostly only handles folding of intrinsic
1241/// instructions. For normal calls, it allows visitCallSite to do the heavy
1242/// lifting.
1243Instruction *InstCombiner::visitCallInst(CallInst &CI) {
1244  auto Args = CI.arg_operands();
1245  if (Value *V = SimplifyCall(CI.getCalledValue(), Args.begin(), Args.end(), DL,
1246                              TLI, DT, AC))
1247    return replaceInstUsesWith(CI, V);
1248
1249  if (isFreeCall(&CI, TLI))
1250    return visitFree(CI);
1251
1252  // If the caller function is nounwind, mark the call as nounwind, even if the
1253  // callee isn't.
1254  if (CI.getParent()->getParent()->doesNotThrow() &&
1255      !CI.doesNotThrow()) {
1256    CI.setDoesNotThrow();
1257    return &CI;
1258  }
1259
1260  IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
1261  if (!II) return visitCallSite(&CI);
1262
1263  // Intrinsics cannot occur in an invoke, so handle them here instead of in
1264  // visitCallSite.
1265  if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
1266    bool Changed = false;
1267
1268    // memmove/cpy/set of zero bytes is a noop.
1269    if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
1270      if (NumBytes->isNullValue())
1271        return eraseInstFromFunction(CI);
1272
1273      if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
1274        if (CI->getZExtValue() == 1) {
1275          // Replace the instruction with just byte operations.  We would
1276          // transform other cases to loads/stores, but we don't know if
1277          // alignment is sufficient.
1278        }
1279    }
1280
1281    // No other transformations apply to volatile transfers.
1282    if (MI->isVolatile())
1283      return nullptr;
1284
1285    // If we have a memmove and the source operation is a constant global,
1286    // then the source and dest pointers can't alias, so we can change this
1287    // into a call to memcpy.
1288    if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
1289      if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
1290        if (GVSrc->isConstant()) {
1291          Module *M = CI.getModule();
1292          Intrinsic::ID MemCpyID = Intrinsic::memcpy;
1293          Type *Tys[3] = { CI.getArgOperand(0)->getType(),
1294                           CI.getArgOperand(1)->getType(),
1295                           CI.getArgOperand(2)->getType() };
1296          CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys));
1297          Changed = true;
1298        }
1299    }
1300
1301    if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
1302      // memmove(x,x,size) -> noop.
1303      if (MTI->getSource() == MTI->getDest())
1304        return eraseInstFromFunction(CI);
1305    }
1306
1307    // If we can determine a pointer alignment that is bigger than currently
1308    // set, update the alignment.
1309    if (isa<MemTransferInst>(MI)) {
1310      if (Instruction *I = SimplifyMemTransfer(MI))
1311        return I;
1312    } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
1313      if (Instruction *I = SimplifyMemSet(MSI))
1314        return I;
1315    }
1316
1317    if (Changed) return II;
1318  }
1319
1320  auto SimplifyDemandedVectorEltsLow = [this](Value *Op, unsigned Width,
1321                                              unsigned DemandedWidth) {
1322    APInt UndefElts(Width, 0);
1323    APInt DemandedElts = APInt::getLowBitsSet(Width, DemandedWidth);
1324    return SimplifyDemandedVectorElts(Op, DemandedElts, UndefElts);
1325  };
1326  auto SimplifyDemandedVectorEltsHigh = [this](Value *Op, unsigned Width,
1327                                              unsigned DemandedWidth) {
1328    APInt UndefElts(Width, 0);
1329    APInt DemandedElts = APInt::getHighBitsSet(Width, DemandedWidth);
1330    return SimplifyDemandedVectorElts(Op, DemandedElts, UndefElts);
1331  };
1332
1333  switch (II->getIntrinsicID()) {
1334  default: break;
1335  case Intrinsic::objectsize: {
1336    uint64_t Size;
1337    if (getObjectSize(II->getArgOperand(0), Size, DL, TLI)) {
1338      APInt APSize(II->getType()->getIntegerBitWidth(), Size);
1339      // Equality check to be sure that `Size` can fit in a value of type
1340      // `II->getType()`
1341      if (APSize == Size)
1342        return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), APSize));
1343    }
1344    return nullptr;
1345  }
1346  case Intrinsic::bswap: {
1347    Value *IIOperand = II->getArgOperand(0);
1348    Value *X = nullptr;
1349
1350    // bswap(bswap(x)) -> x
1351    if (match(IIOperand, m_BSwap(m_Value(X))))
1352        return replaceInstUsesWith(CI, X);
1353
1354    // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
1355    if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) {
1356      unsigned C = X->getType()->getPrimitiveSizeInBits() -
1357        IIOperand->getType()->getPrimitiveSizeInBits();
1358      Value *CV = ConstantInt::get(X->getType(), C);
1359      Value *V = Builder->CreateLShr(X, CV);
1360      return new TruncInst(V, IIOperand->getType());
1361    }
1362    break;
1363  }
1364
1365  case Intrinsic::bitreverse: {
1366    Value *IIOperand = II->getArgOperand(0);
1367    Value *X = nullptr;
1368
1369    // bitreverse(bitreverse(x)) -> x
1370    if (match(IIOperand, m_Intrinsic<Intrinsic::bitreverse>(m_Value(X))))
1371      return replaceInstUsesWith(CI, X);
1372    break;
1373  }
1374
1375  case Intrinsic::masked_load:
1376    if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II, *Builder))
1377      return replaceInstUsesWith(CI, SimplifiedMaskedOp);
1378    break;
1379  case Intrinsic::masked_store:
1380    return simplifyMaskedStore(*II, *this);
1381  case Intrinsic::masked_gather:
1382    return simplifyMaskedGather(*II, *this);
1383  case Intrinsic::masked_scatter:
1384    return simplifyMaskedScatter(*II, *this);
1385
1386  case Intrinsic::powi:
1387    if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
1388      // powi(x, 0) -> 1.0
1389      if (Power->isZero())
1390        return replaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0));
1391      // powi(x, 1) -> x
1392      if (Power->isOne())
1393        return replaceInstUsesWith(CI, II->getArgOperand(0));
1394      // powi(x, -1) -> 1/x
1395      if (Power->isAllOnesValue())
1396        return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
1397                                          II->getArgOperand(0));
1398    }
1399    break;
1400  case Intrinsic::cttz: {
1401    // If all bits below the first known one are known zero,
1402    // this value is constant.
1403    IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
1404    // FIXME: Try to simplify vectors of integers.
1405    if (!IT) break;
1406    uint32_t BitWidth = IT->getBitWidth();
1407    APInt KnownZero(BitWidth, 0);
1408    APInt KnownOne(BitWidth, 0);
1409    computeKnownBits(II->getArgOperand(0), KnownZero, KnownOne, 0, II);
1410    unsigned TrailingZeros = KnownOne.countTrailingZeros();
1411    APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
1412    if ((Mask & KnownZero) == Mask)
1413      return replaceInstUsesWith(CI, ConstantInt::get(IT,
1414                                 APInt(BitWidth, TrailingZeros)));
1415
1416    }
1417    break;
1418  case Intrinsic::ctlz: {
1419    // If all bits above the first known one are known zero,
1420    // this value is constant.
1421    IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
1422    // FIXME: Try to simplify vectors of integers.
1423    if (!IT) break;
1424    uint32_t BitWidth = IT->getBitWidth();
1425    APInt KnownZero(BitWidth, 0);
1426    APInt KnownOne(BitWidth, 0);
1427    computeKnownBits(II->getArgOperand(0), KnownZero, KnownOne, 0, II);
1428    unsigned LeadingZeros = KnownOne.countLeadingZeros();
1429    APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
1430    if ((Mask & KnownZero) == Mask)
1431      return replaceInstUsesWith(CI, ConstantInt::get(IT,
1432                                 APInt(BitWidth, LeadingZeros)));
1433
1434    }
1435    break;
1436
1437  case Intrinsic::uadd_with_overflow:
1438  case Intrinsic::sadd_with_overflow:
1439  case Intrinsic::umul_with_overflow:
1440  case Intrinsic::smul_with_overflow:
1441    if (isa<Constant>(II->getArgOperand(0)) &&
1442        !isa<Constant>(II->getArgOperand(1))) {
1443      // Canonicalize constants into the RHS.
1444      Value *LHS = II->getArgOperand(0);
1445      II->setArgOperand(0, II->getArgOperand(1));
1446      II->setArgOperand(1, LHS);
1447      return II;
1448    }
1449    // fall through
1450
1451  case Intrinsic::usub_with_overflow:
1452  case Intrinsic::ssub_with_overflow: {
1453    OverflowCheckFlavor OCF =
1454        IntrinsicIDToOverflowCheckFlavor(II->getIntrinsicID());
1455    assert(OCF != OCF_INVALID && "unexpected!");
1456
1457    Value *OperationResult = nullptr;
1458    Constant *OverflowResult = nullptr;
1459    if (OptimizeOverflowCheck(OCF, II->getArgOperand(0), II->getArgOperand(1),
1460                              *II, OperationResult, OverflowResult))
1461      return CreateOverflowTuple(II, OperationResult, OverflowResult);
1462
1463    break;
1464  }
1465
1466  case Intrinsic::minnum:
1467  case Intrinsic::maxnum: {
1468    Value *Arg0 = II->getArgOperand(0);
1469    Value *Arg1 = II->getArgOperand(1);
1470    // Canonicalize constants to the RHS.
1471    if (isa<ConstantFP>(Arg0) && !isa<ConstantFP>(Arg1)) {
1472      II->setArgOperand(0, Arg1);
1473      II->setArgOperand(1, Arg0);
1474      return II;
1475    }
1476    if (Value *V = simplifyMinnumMaxnum(*II))
1477      return replaceInstUsesWith(*II, V);
1478    break;
1479  }
1480  case Intrinsic::ppc_altivec_lvx:
1481  case Intrinsic::ppc_altivec_lvxl:
1482    // Turn PPC lvx -> load if the pointer is known aligned.
1483    if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, AC, DT) >=
1484        16) {
1485      Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
1486                                         PointerType::getUnqual(II->getType()));
1487      return new LoadInst(Ptr);
1488    }
1489    break;
1490  case Intrinsic::ppc_vsx_lxvw4x:
1491  case Intrinsic::ppc_vsx_lxvd2x: {
1492    // Turn PPC VSX loads into normal loads.
1493    Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
1494                                        PointerType::getUnqual(II->getType()));
1495    return new LoadInst(Ptr, Twine(""), false, 1);
1496  }
1497  case Intrinsic::ppc_altivec_stvx:
1498  case Intrinsic::ppc_altivec_stvxl:
1499    // Turn stvx -> store if the pointer is known aligned.
1500    if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, AC, DT) >=
1501        16) {
1502      Type *OpPtrTy =
1503        PointerType::getUnqual(II->getArgOperand(0)->getType());
1504      Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
1505      return new StoreInst(II->getArgOperand(0), Ptr);
1506    }
1507    break;
1508  case Intrinsic::ppc_vsx_stxvw4x:
1509  case Intrinsic::ppc_vsx_stxvd2x: {
1510    // Turn PPC VSX stores into normal stores.
1511    Type *OpPtrTy = PointerType::getUnqual(II->getArgOperand(0)->getType());
1512    Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
1513    return new StoreInst(II->getArgOperand(0), Ptr, false, 1);
1514  }
1515  case Intrinsic::ppc_qpx_qvlfs:
1516    // Turn PPC QPX qvlfs -> load if the pointer is known aligned.
1517    if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, AC, DT) >=
1518        16) {
1519      Type *VTy = VectorType::get(Builder->getFloatTy(),
1520                                  II->getType()->getVectorNumElements());
1521      Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
1522                                         PointerType::getUnqual(VTy));
1523      Value *Load = Builder->CreateLoad(Ptr);
1524      return new FPExtInst(Load, II->getType());
1525    }
1526    break;
1527  case Intrinsic::ppc_qpx_qvlfd:
1528    // Turn PPC QPX qvlfd -> load if the pointer is known aligned.
1529    if (getOrEnforceKnownAlignment(II->getArgOperand(0), 32, DL, II, AC, DT) >=
1530        32) {
1531      Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
1532                                         PointerType::getUnqual(II->getType()));
1533      return new LoadInst(Ptr);
1534    }
1535    break;
1536  case Intrinsic::ppc_qpx_qvstfs:
1537    // Turn PPC QPX qvstfs -> store if the pointer is known aligned.
1538    if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, AC, DT) >=
1539        16) {
1540      Type *VTy = VectorType::get(Builder->getFloatTy(),
1541          II->getArgOperand(0)->getType()->getVectorNumElements());
1542      Value *TOp = Builder->CreateFPTrunc(II->getArgOperand(0), VTy);
1543      Type *OpPtrTy = PointerType::getUnqual(VTy);
1544      Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
1545      return new StoreInst(TOp, Ptr);
1546    }
1547    break;
1548  case Intrinsic::ppc_qpx_qvstfd:
1549    // Turn PPC QPX qvstfd -> store if the pointer is known aligned.
1550    if (getOrEnforceKnownAlignment(II->getArgOperand(1), 32, DL, II, AC, DT) >=
1551        32) {
1552      Type *OpPtrTy =
1553        PointerType::getUnqual(II->getArgOperand(0)->getType());
1554      Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
1555      return new StoreInst(II->getArgOperand(0), Ptr);
1556    }
1557    break;
1558
1559  case Intrinsic::x86_vcvtph2ps_128:
1560  case Intrinsic::x86_vcvtph2ps_256: {
1561    auto Arg = II->getArgOperand(0);
1562    auto ArgType = cast<VectorType>(Arg->getType());
1563    auto RetType = cast<VectorType>(II->getType());
1564    unsigned ArgWidth = ArgType->getNumElements();
1565    unsigned RetWidth = RetType->getNumElements();
1566    assert(RetWidth <= ArgWidth && "Unexpected input/return vector widths");
1567    assert(ArgType->isIntOrIntVectorTy() &&
1568           ArgType->getScalarSizeInBits() == 16 &&
1569           "CVTPH2PS input type should be 16-bit integer vector");
1570    assert(RetType->getScalarType()->isFloatTy() &&
1571           "CVTPH2PS output type should be 32-bit float vector");
1572
1573    // Constant folding: Convert to generic half to single conversion.
1574    if (isa<ConstantAggregateZero>(Arg))
1575      return replaceInstUsesWith(*II, ConstantAggregateZero::get(RetType));
1576
1577    if (isa<ConstantDataVector>(Arg)) {
1578      auto VectorHalfAsShorts = Arg;
1579      if (RetWidth < ArgWidth) {
1580        SmallVector<uint32_t, 8> SubVecMask;
1581        for (unsigned i = 0; i != RetWidth; ++i)
1582          SubVecMask.push_back((int)i);
1583        VectorHalfAsShorts = Builder->CreateShuffleVector(
1584            Arg, UndefValue::get(ArgType), SubVecMask);
1585      }
1586
1587      auto VectorHalfType =
1588          VectorType::get(Type::getHalfTy(II->getContext()), RetWidth);
1589      auto VectorHalfs =
1590          Builder->CreateBitCast(VectorHalfAsShorts, VectorHalfType);
1591      auto VectorFloats = Builder->CreateFPExt(VectorHalfs, RetType);
1592      return replaceInstUsesWith(*II, VectorFloats);
1593    }
1594
1595    // We only use the lowest lanes of the argument.
1596    if (Value *V = SimplifyDemandedVectorEltsLow(Arg, ArgWidth, RetWidth)) {
1597      II->setArgOperand(0, V);
1598      return II;
1599    }
1600    break;
1601  }
1602
1603  case Intrinsic::x86_sse_cvtss2si:
1604  case Intrinsic::x86_sse_cvtss2si64:
1605  case Intrinsic::x86_sse_cvttss2si:
1606  case Intrinsic::x86_sse_cvttss2si64:
1607  case Intrinsic::x86_sse2_cvtsd2si:
1608  case Intrinsic::x86_sse2_cvtsd2si64:
1609  case Intrinsic::x86_sse2_cvttsd2si:
1610  case Intrinsic::x86_sse2_cvttsd2si64: {
1611    // These intrinsics only demand the 0th element of their input vectors. If
1612    // we can simplify the input based on that, do so now.
1613    Value *Arg = II->getArgOperand(0);
1614    unsigned VWidth = Arg->getType()->getVectorNumElements();
1615    if (Value *V = SimplifyDemandedVectorEltsLow(Arg, VWidth, 1)) {
1616      II->setArgOperand(0, V);
1617      return II;
1618    }
1619    break;
1620  }
1621
1622  case Intrinsic::x86_mmx_pmovmskb:
1623  case Intrinsic::x86_sse_movmsk_ps:
1624  case Intrinsic::x86_sse2_movmsk_pd:
1625  case Intrinsic::x86_sse2_pmovmskb_128:
1626  case Intrinsic::x86_avx_movmsk_pd_256:
1627  case Intrinsic::x86_avx_movmsk_ps_256:
1628  case Intrinsic::x86_avx2_pmovmskb: {
1629    if (Value *V = simplifyX86movmsk(*II, *Builder))
1630      return replaceInstUsesWith(*II, V);
1631    break;
1632  }
1633
1634  case Intrinsic::x86_sse_comieq_ss:
1635  case Intrinsic::x86_sse_comige_ss:
1636  case Intrinsic::x86_sse_comigt_ss:
1637  case Intrinsic::x86_sse_comile_ss:
1638  case Intrinsic::x86_sse_comilt_ss:
1639  case Intrinsic::x86_sse_comineq_ss:
1640  case Intrinsic::x86_sse_ucomieq_ss:
1641  case Intrinsic::x86_sse_ucomige_ss:
1642  case Intrinsic::x86_sse_ucomigt_ss:
1643  case Intrinsic::x86_sse_ucomile_ss:
1644  case Intrinsic::x86_sse_ucomilt_ss:
1645  case Intrinsic::x86_sse_ucomineq_ss:
1646  case Intrinsic::x86_sse2_comieq_sd:
1647  case Intrinsic::x86_sse2_comige_sd:
1648  case Intrinsic::x86_sse2_comigt_sd:
1649  case Intrinsic::x86_sse2_comile_sd:
1650  case Intrinsic::x86_sse2_comilt_sd:
1651  case Intrinsic::x86_sse2_comineq_sd:
1652  case Intrinsic::x86_sse2_ucomieq_sd:
1653  case Intrinsic::x86_sse2_ucomige_sd:
1654  case Intrinsic::x86_sse2_ucomigt_sd:
1655  case Intrinsic::x86_sse2_ucomile_sd:
1656  case Intrinsic::x86_sse2_ucomilt_sd:
1657  case Intrinsic::x86_sse2_ucomineq_sd: {
1658    // These intrinsics only demand the 0th element of their input vectors. If
1659    // we can simplify the input based on that, do so now.
1660    bool MadeChange = false;
1661    Value *Arg0 = II->getArgOperand(0);
1662    Value *Arg1 = II->getArgOperand(1);
1663    unsigned VWidth = Arg0->getType()->getVectorNumElements();
1664    if (Value *V = SimplifyDemandedVectorEltsLow(Arg0, VWidth, 1)) {
1665      II->setArgOperand(0, V);
1666      MadeChange = true;
1667    }
1668    if (Value *V = SimplifyDemandedVectorEltsLow(Arg1, VWidth, 1)) {
1669      II->setArgOperand(1, V);
1670      MadeChange = true;
1671    }
1672    if (MadeChange)
1673      return II;
1674    break;
1675  }
1676
1677  case Intrinsic::x86_sse_add_ss:
1678  case Intrinsic::x86_sse_sub_ss:
1679  case Intrinsic::x86_sse_mul_ss:
1680  case Intrinsic::x86_sse_div_ss:
1681  case Intrinsic::x86_sse_min_ss:
1682  case Intrinsic::x86_sse_max_ss:
1683  case Intrinsic::x86_sse_cmp_ss:
1684  case Intrinsic::x86_sse2_add_sd:
1685  case Intrinsic::x86_sse2_sub_sd:
1686  case Intrinsic::x86_sse2_mul_sd:
1687  case Intrinsic::x86_sse2_div_sd:
1688  case Intrinsic::x86_sse2_min_sd:
1689  case Intrinsic::x86_sse2_max_sd:
1690  case Intrinsic::x86_sse2_cmp_sd: {
1691    // These intrinsics only demand the lowest element of the second input
1692    // vector.
1693    Value *Arg1 = II->getArgOperand(1);
1694    unsigned VWidth = Arg1->getType()->getVectorNumElements();
1695    if (Value *V = SimplifyDemandedVectorEltsLow(Arg1, VWidth, 1)) {
1696      II->setArgOperand(1, V);
1697      return II;
1698    }
1699    break;
1700  }
1701
1702  case Intrinsic::x86_sse41_round_ss:
1703  case Intrinsic::x86_sse41_round_sd: {
1704    // These intrinsics demand the upper elements of the first input vector and
1705    // the lowest element of the second input vector.
1706    bool MadeChange = false;
1707    Value *Arg0 = II->getArgOperand(0);
1708    Value *Arg1 = II->getArgOperand(1);
1709    unsigned VWidth = Arg0->getType()->getVectorNumElements();
1710    if (Value *V = SimplifyDemandedVectorEltsHigh(Arg0, VWidth, VWidth - 1)) {
1711      II->setArgOperand(0, V);
1712      MadeChange = true;
1713    }
1714    if (Value *V = SimplifyDemandedVectorEltsLow(Arg1, VWidth, 1)) {
1715      II->setArgOperand(1, V);
1716      MadeChange = true;
1717    }
1718    if (MadeChange)
1719      return II;
1720    break;
1721  }
1722
1723  // Constant fold ashr( <A x Bi>, Ci ).
1724  // Constant fold lshr( <A x Bi>, Ci ).
1725  // Constant fold shl( <A x Bi>, Ci ).
1726  case Intrinsic::x86_sse2_psrai_d:
1727  case Intrinsic::x86_sse2_psrai_w:
1728  case Intrinsic::x86_avx2_psrai_d:
1729  case Intrinsic::x86_avx2_psrai_w:
1730  case Intrinsic::x86_sse2_psrli_d:
1731  case Intrinsic::x86_sse2_psrli_q:
1732  case Intrinsic::x86_sse2_psrli_w:
1733  case Intrinsic::x86_avx2_psrli_d:
1734  case Intrinsic::x86_avx2_psrli_q:
1735  case Intrinsic::x86_avx2_psrli_w:
1736  case Intrinsic::x86_sse2_pslli_d:
1737  case Intrinsic::x86_sse2_pslli_q:
1738  case Intrinsic::x86_sse2_pslli_w:
1739  case Intrinsic::x86_avx2_pslli_d:
1740  case Intrinsic::x86_avx2_pslli_q:
1741  case Intrinsic::x86_avx2_pslli_w:
1742    if (Value *V = simplifyX86immShift(*II, *Builder))
1743      return replaceInstUsesWith(*II, V);
1744    break;
1745
1746  case Intrinsic::x86_sse2_psra_d:
1747  case Intrinsic::x86_sse2_psra_w:
1748  case Intrinsic::x86_avx2_psra_d:
1749  case Intrinsic::x86_avx2_psra_w:
1750  case Intrinsic::x86_sse2_psrl_d:
1751  case Intrinsic::x86_sse2_psrl_q:
1752  case Intrinsic::x86_sse2_psrl_w:
1753  case Intrinsic::x86_avx2_psrl_d:
1754  case Intrinsic::x86_avx2_psrl_q:
1755  case Intrinsic::x86_avx2_psrl_w:
1756  case Intrinsic::x86_sse2_psll_d:
1757  case Intrinsic::x86_sse2_psll_q:
1758  case Intrinsic::x86_sse2_psll_w:
1759  case Intrinsic::x86_avx2_psll_d:
1760  case Intrinsic::x86_avx2_psll_q:
1761  case Intrinsic::x86_avx2_psll_w: {
1762    if (Value *V = simplifyX86immShift(*II, *Builder))
1763      return replaceInstUsesWith(*II, V);
1764
1765    // SSE2/AVX2 uses only the first 64-bits of the 128-bit vector
1766    // operand to compute the shift amount.
1767    Value *Arg1 = II->getArgOperand(1);
1768    assert(Arg1->getType()->getPrimitiveSizeInBits() == 128 &&
1769           "Unexpected packed shift size");
1770    unsigned VWidth = Arg1->getType()->getVectorNumElements();
1771
1772    if (Value *V = SimplifyDemandedVectorEltsLow(Arg1, VWidth, VWidth / 2)) {
1773      II->setArgOperand(1, V);
1774      return II;
1775    }
1776    break;
1777  }
1778
1779  case Intrinsic::x86_avx2_psllv_d:
1780  case Intrinsic::x86_avx2_psllv_d_256:
1781  case Intrinsic::x86_avx2_psllv_q:
1782  case Intrinsic::x86_avx2_psllv_q_256:
1783  case Intrinsic::x86_avx2_psrav_d:
1784  case Intrinsic::x86_avx2_psrav_d_256:
1785  case Intrinsic::x86_avx2_psrlv_d:
1786  case Intrinsic::x86_avx2_psrlv_d_256:
1787  case Intrinsic::x86_avx2_psrlv_q:
1788  case Intrinsic::x86_avx2_psrlv_q_256:
1789    if (Value *V = simplifyX86varShift(*II, *Builder))
1790      return replaceInstUsesWith(*II, V);
1791    break;
1792
1793  case Intrinsic::x86_sse41_insertps:
1794    if (Value *V = simplifyX86insertps(*II, *Builder))
1795      return replaceInstUsesWith(*II, V);
1796    break;
1797
1798  case Intrinsic::x86_sse4a_extrq: {
1799    Value *Op0 = II->getArgOperand(0);
1800    Value *Op1 = II->getArgOperand(1);
1801    unsigned VWidth0 = Op0->getType()->getVectorNumElements();
1802    unsigned VWidth1 = Op1->getType()->getVectorNumElements();
1803    assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
1804           Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth0 == 2 &&
1805           VWidth1 == 16 && "Unexpected operand sizes");
1806
1807    // See if we're dealing with constant values.
1808    Constant *C1 = dyn_cast<Constant>(Op1);
1809    ConstantInt *CILength =
1810        C1 ? dyn_cast<ConstantInt>(C1->getAggregateElement((unsigned)0))
1811           : nullptr;
1812    ConstantInt *CIIndex =
1813        C1 ? dyn_cast<ConstantInt>(C1->getAggregateElement((unsigned)1))
1814           : nullptr;
1815
1816    // Attempt to simplify to a constant, shuffle vector or EXTRQI call.
1817    if (Value *V = simplifyX86extrq(*II, Op0, CILength, CIIndex, *Builder))
1818      return replaceInstUsesWith(*II, V);
1819
1820    // EXTRQ only uses the lowest 64-bits of the first 128-bit vector
1821    // operands and the lowest 16-bits of the second.
1822    bool MadeChange = false;
1823    if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth0, 1)) {
1824      II->setArgOperand(0, V);
1825      MadeChange = true;
1826    }
1827    if (Value *V = SimplifyDemandedVectorEltsLow(Op1, VWidth1, 2)) {
1828      II->setArgOperand(1, V);
1829      MadeChange = true;
1830    }
1831    if (MadeChange)
1832      return II;
1833    break;
1834  }
1835
1836  case Intrinsic::x86_sse4a_extrqi: {
1837    // EXTRQI: Extract Length bits starting from Index. Zero pad the remaining
1838    // bits of the lower 64-bits. The upper 64-bits are undefined.
1839    Value *Op0 = II->getArgOperand(0);
1840    unsigned VWidth = Op0->getType()->getVectorNumElements();
1841    assert(Op0->getType()->getPrimitiveSizeInBits() == 128 && VWidth == 2 &&
1842           "Unexpected operand size");
1843
1844    // See if we're dealing with constant values.
1845    ConstantInt *CILength = dyn_cast<ConstantInt>(II->getArgOperand(1));
1846    ConstantInt *CIIndex = dyn_cast<ConstantInt>(II->getArgOperand(2));
1847
1848    // Attempt to simplify to a constant or shuffle vector.
1849    if (Value *V = simplifyX86extrq(*II, Op0, CILength, CIIndex, *Builder))
1850      return replaceInstUsesWith(*II, V);
1851
1852    // EXTRQI only uses the lowest 64-bits of the first 128-bit vector
1853    // operand.
1854    if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth, 1)) {
1855      II->setArgOperand(0, V);
1856      return II;
1857    }
1858    break;
1859  }
1860
1861  case Intrinsic::x86_sse4a_insertq: {
1862    Value *Op0 = II->getArgOperand(0);
1863    Value *Op1 = II->getArgOperand(1);
1864    unsigned VWidth = Op0->getType()->getVectorNumElements();
1865    assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
1866           Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth == 2 &&
1867           Op1->getType()->getVectorNumElements() == 2 &&
1868           "Unexpected operand size");
1869
1870    // See if we're dealing with constant values.
1871    Constant *C1 = dyn_cast<Constant>(Op1);
1872    ConstantInt *CI11 =
1873        C1 ? dyn_cast<ConstantInt>(C1->getAggregateElement((unsigned)1))
1874           : nullptr;
1875
1876    // Attempt to simplify to a constant, shuffle vector or INSERTQI call.
1877    if (CI11) {
1878      const APInt &V11 = CI11->getValue();
1879      APInt Len = V11.zextOrTrunc(6);
1880      APInt Idx = V11.lshr(8).zextOrTrunc(6);
1881      if (Value *V = simplifyX86insertq(*II, Op0, Op1, Len, Idx, *Builder))
1882        return replaceInstUsesWith(*II, V);
1883    }
1884
1885    // INSERTQ only uses the lowest 64-bits of the first 128-bit vector
1886    // operand.
1887    if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth, 1)) {
1888      II->setArgOperand(0, V);
1889      return II;
1890    }
1891    break;
1892  }
1893
1894  case Intrinsic::x86_sse4a_insertqi: {
1895    // INSERTQI: Extract lowest Length bits from lower half of second source and
1896    // insert over first source starting at Index bit. The upper 64-bits are
1897    // undefined.
1898    Value *Op0 = II->getArgOperand(0);
1899    Value *Op1 = II->getArgOperand(1);
1900    unsigned VWidth0 = Op0->getType()->getVectorNumElements();
1901    unsigned VWidth1 = Op1->getType()->getVectorNumElements();
1902    assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
1903           Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth0 == 2 &&
1904           VWidth1 == 2 && "Unexpected operand sizes");
1905
1906    // See if we're dealing with constant values.
1907    ConstantInt *CILength = dyn_cast<ConstantInt>(II->getArgOperand(2));
1908    ConstantInt *CIIndex = dyn_cast<ConstantInt>(II->getArgOperand(3));
1909
1910    // Attempt to simplify to a constant or shuffle vector.
1911    if (CILength && CIIndex) {
1912      APInt Len = CILength->getValue().zextOrTrunc(6);
1913      APInt Idx = CIIndex->getValue().zextOrTrunc(6);
1914      if (Value *V = simplifyX86insertq(*II, Op0, Op1, Len, Idx, *Builder))
1915        return replaceInstUsesWith(*II, V);
1916    }
1917
1918    // INSERTQI only uses the lowest 64-bits of the first two 128-bit vector
1919    // operands.
1920    bool MadeChange = false;
1921    if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth0, 1)) {
1922      II->setArgOperand(0, V);
1923      MadeChange = true;
1924    }
1925    if (Value *V = SimplifyDemandedVectorEltsLow(Op1, VWidth1, 1)) {
1926      II->setArgOperand(1, V);
1927      MadeChange = true;
1928    }
1929    if (MadeChange)
1930      return II;
1931    break;
1932  }
1933
1934  case Intrinsic::x86_sse41_pblendvb:
1935  case Intrinsic::x86_sse41_blendvps:
1936  case Intrinsic::x86_sse41_blendvpd:
1937  case Intrinsic::x86_avx_blendv_ps_256:
1938  case Intrinsic::x86_avx_blendv_pd_256:
1939  case Intrinsic::x86_avx2_pblendvb: {
1940    // Convert blendv* to vector selects if the mask is constant.
1941    // This optimization is convoluted because the intrinsic is defined as
1942    // getting a vector of floats or doubles for the ps and pd versions.
1943    // FIXME: That should be changed.
1944
1945    Value *Op0 = II->getArgOperand(0);
1946    Value *Op1 = II->getArgOperand(1);
1947    Value *Mask = II->getArgOperand(2);
1948
1949    // fold (blend A, A, Mask) -> A
1950    if (Op0 == Op1)
1951      return replaceInstUsesWith(CI, Op0);
1952
1953    // Zero Mask - select 1st argument.
1954    if (isa<ConstantAggregateZero>(Mask))
1955      return replaceInstUsesWith(CI, Op0);
1956
1957    // Constant Mask - select 1st/2nd argument lane based on top bit of mask.
1958    if (auto *ConstantMask = dyn_cast<ConstantDataVector>(Mask)) {
1959      Constant *NewSelector = getNegativeIsTrueBoolVec(ConstantMask);
1960      return SelectInst::Create(NewSelector, Op1, Op0, "blendv");
1961    }
1962    break;
1963  }
1964
1965  case Intrinsic::x86_ssse3_pshuf_b_128:
1966  case Intrinsic::x86_avx2_pshuf_b:
1967    if (Value *V = simplifyX86pshufb(*II, *Builder))
1968      return replaceInstUsesWith(*II, V);
1969    break;
1970
1971  case Intrinsic::x86_avx_vpermilvar_ps:
1972  case Intrinsic::x86_avx_vpermilvar_ps_256:
1973  case Intrinsic::x86_avx_vpermilvar_pd:
1974  case Intrinsic::x86_avx_vpermilvar_pd_256:
1975    if (Value *V = simplifyX86vpermilvar(*II, *Builder))
1976      return replaceInstUsesWith(*II, V);
1977    break;
1978
1979  case Intrinsic::x86_avx2_permd:
1980  case Intrinsic::x86_avx2_permps:
1981    if (Value *V = simplifyX86vpermv(*II, *Builder))
1982      return replaceInstUsesWith(*II, V);
1983    break;
1984
1985  case Intrinsic::x86_avx_vperm2f128_pd_256:
1986  case Intrinsic::x86_avx_vperm2f128_ps_256:
1987  case Intrinsic::x86_avx_vperm2f128_si_256:
1988  case Intrinsic::x86_avx2_vperm2i128:
1989    if (Value *V = simplifyX86vperm2(*II, *Builder))
1990      return replaceInstUsesWith(*II, V);
1991    break;
1992
1993  case Intrinsic::x86_avx_maskload_ps:
1994  case Intrinsic::x86_avx_maskload_pd:
1995  case Intrinsic::x86_avx_maskload_ps_256:
1996  case Intrinsic::x86_avx_maskload_pd_256:
1997  case Intrinsic::x86_avx2_maskload_d:
1998  case Intrinsic::x86_avx2_maskload_q:
1999  case Intrinsic::x86_avx2_maskload_d_256:
2000  case Intrinsic::x86_avx2_maskload_q_256:
2001    if (Instruction *I = simplifyX86MaskedLoad(*II, *this))
2002      return I;
2003    break;
2004
2005  case Intrinsic::x86_sse2_maskmov_dqu:
2006  case Intrinsic::x86_avx_maskstore_ps:
2007  case Intrinsic::x86_avx_maskstore_pd:
2008  case Intrinsic::x86_avx_maskstore_ps_256:
2009  case Intrinsic::x86_avx_maskstore_pd_256:
2010  case Intrinsic::x86_avx2_maskstore_d:
2011  case Intrinsic::x86_avx2_maskstore_q:
2012  case Intrinsic::x86_avx2_maskstore_d_256:
2013  case Intrinsic::x86_avx2_maskstore_q_256:
2014    if (simplifyX86MaskedStore(*II, *this))
2015      return nullptr;
2016    break;
2017
2018  case Intrinsic::x86_xop_vpcomb:
2019  case Intrinsic::x86_xop_vpcomd:
2020  case Intrinsic::x86_xop_vpcomq:
2021  case Intrinsic::x86_xop_vpcomw:
2022    if (Value *V = simplifyX86vpcom(*II, *Builder, true))
2023      return replaceInstUsesWith(*II, V);
2024    break;
2025
2026  case Intrinsic::x86_xop_vpcomub:
2027  case Intrinsic::x86_xop_vpcomud:
2028  case Intrinsic::x86_xop_vpcomuq:
2029  case Intrinsic::x86_xop_vpcomuw:
2030    if (Value *V = simplifyX86vpcom(*II, *Builder, false))
2031      return replaceInstUsesWith(*II, V);
2032    break;
2033
2034  case Intrinsic::ppc_altivec_vperm:
2035    // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
2036    // Note that ppc_altivec_vperm has a big-endian bias, so when creating
2037    // a vectorshuffle for little endian, we must undo the transformation
2038    // performed on vec_perm in altivec.h.  That is, we must complement
2039    // the permutation mask with respect to 31 and reverse the order of
2040    // V1 and V2.
2041    if (Constant *Mask = dyn_cast<Constant>(II->getArgOperand(2))) {
2042      assert(Mask->getType()->getVectorNumElements() == 16 &&
2043             "Bad type for intrinsic!");
2044
2045      // Check that all of the elements are integer constants or undefs.
2046      bool AllEltsOk = true;
2047      for (unsigned i = 0; i != 16; ++i) {
2048        Constant *Elt = Mask->getAggregateElement(i);
2049        if (!Elt || !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
2050          AllEltsOk = false;
2051          break;
2052        }
2053      }
2054
2055      if (AllEltsOk) {
2056        // Cast the input vectors to byte vectors.
2057        Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0),
2058                                            Mask->getType());
2059        Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1),
2060                                            Mask->getType());
2061        Value *Result = UndefValue::get(Op0->getType());
2062
2063        // Only extract each element once.
2064        Value *ExtractedElts[32];
2065        memset(ExtractedElts, 0, sizeof(ExtractedElts));
2066
2067        for (unsigned i = 0; i != 16; ++i) {
2068          if (isa<UndefValue>(Mask->getAggregateElement(i)))
2069            continue;
2070          unsigned Idx =
2071            cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
2072          Idx &= 31;  // Match the hardware behavior.
2073          if (DL.isLittleEndian())
2074            Idx = 31 - Idx;
2075
2076          if (!ExtractedElts[Idx]) {
2077            Value *Op0ToUse = (DL.isLittleEndian()) ? Op1 : Op0;
2078            Value *Op1ToUse = (DL.isLittleEndian()) ? Op0 : Op1;
2079            ExtractedElts[Idx] =
2080              Builder->CreateExtractElement(Idx < 16 ? Op0ToUse : Op1ToUse,
2081                                            Builder->getInt32(Idx&15));
2082          }
2083
2084          // Insert this value into the result vector.
2085          Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx],
2086                                                Builder->getInt32(i));
2087        }
2088        return CastInst::Create(Instruction::BitCast, Result, CI.getType());
2089      }
2090    }
2091    break;
2092
2093  case Intrinsic::arm_neon_vld1:
2094  case Intrinsic::arm_neon_vld2:
2095  case Intrinsic::arm_neon_vld3:
2096  case Intrinsic::arm_neon_vld4:
2097  case Intrinsic::arm_neon_vld2lane:
2098  case Intrinsic::arm_neon_vld3lane:
2099  case Intrinsic::arm_neon_vld4lane:
2100  case Intrinsic::arm_neon_vst1:
2101  case Intrinsic::arm_neon_vst2:
2102  case Intrinsic::arm_neon_vst3:
2103  case Intrinsic::arm_neon_vst4:
2104  case Intrinsic::arm_neon_vst2lane:
2105  case Intrinsic::arm_neon_vst3lane:
2106  case Intrinsic::arm_neon_vst4lane: {
2107    unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), DL, II, AC, DT);
2108    unsigned AlignArg = II->getNumArgOperands() - 1;
2109    ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
2110    if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
2111      II->setArgOperand(AlignArg,
2112                        ConstantInt::get(Type::getInt32Ty(II->getContext()),
2113                                         MemAlign, false));
2114      return II;
2115    }
2116    break;
2117  }
2118
2119  case Intrinsic::arm_neon_vmulls:
2120  case Intrinsic::arm_neon_vmullu:
2121  case Intrinsic::aarch64_neon_smull:
2122  case Intrinsic::aarch64_neon_umull: {
2123    Value *Arg0 = II->getArgOperand(0);
2124    Value *Arg1 = II->getArgOperand(1);
2125
2126    // Handle mul by zero first:
2127    if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
2128      return replaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType()));
2129    }
2130
2131    // Check for constant LHS & RHS - in this case we just simplify.
2132    bool Zext = (II->getIntrinsicID() == Intrinsic::arm_neon_vmullu ||
2133                 II->getIntrinsicID() == Intrinsic::aarch64_neon_umull);
2134    VectorType *NewVT = cast<VectorType>(II->getType());
2135    if (Constant *CV0 = dyn_cast<Constant>(Arg0)) {
2136      if (Constant *CV1 = dyn_cast<Constant>(Arg1)) {
2137        CV0 = ConstantExpr::getIntegerCast(CV0, NewVT, /*isSigned=*/!Zext);
2138        CV1 = ConstantExpr::getIntegerCast(CV1, NewVT, /*isSigned=*/!Zext);
2139
2140        return replaceInstUsesWith(CI, ConstantExpr::getMul(CV0, CV1));
2141      }
2142
2143      // Couldn't simplify - canonicalize constant to the RHS.
2144      std::swap(Arg0, Arg1);
2145    }
2146
2147    // Handle mul by one:
2148    if (Constant *CV1 = dyn_cast<Constant>(Arg1))
2149      if (ConstantInt *Splat =
2150              dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
2151        if (Splat->isOne())
2152          return CastInst::CreateIntegerCast(Arg0, II->getType(),
2153                                             /*isSigned=*/!Zext);
2154
2155    break;
2156  }
2157
2158  case Intrinsic::amdgcn_rcp: {
2159    if (const ConstantFP *C = dyn_cast<ConstantFP>(II->getArgOperand(0))) {
2160      const APFloat &ArgVal = C->getValueAPF();
2161      APFloat Val(ArgVal.getSemantics(), 1.0);
2162      APFloat::opStatus Status = Val.divide(ArgVal,
2163                                            APFloat::rmNearestTiesToEven);
2164      // Only do this if it was exact and therefore not dependent on the
2165      // rounding mode.
2166      if (Status == APFloat::opOK)
2167        return replaceInstUsesWith(CI, ConstantFP::get(II->getContext(), Val));
2168    }
2169
2170    break;
2171  }
2172  case Intrinsic::amdgcn_frexp_mant:
2173  case Intrinsic::amdgcn_frexp_exp: {
2174    Value *Src = II->getArgOperand(0);
2175    if (const ConstantFP *C = dyn_cast<ConstantFP>(Src)) {
2176      int Exp;
2177      APFloat Significand = frexp(C->getValueAPF(), Exp,
2178                                  APFloat::rmNearestTiesToEven);
2179
2180      if (II->getIntrinsicID() == Intrinsic::amdgcn_frexp_mant) {
2181        return replaceInstUsesWith(CI, ConstantFP::get(II->getContext(),
2182                                                       Significand));
2183      }
2184
2185      // Match instruction special case behavior.
2186      if (Exp == APFloat::IEK_NaN || Exp == APFloat::IEK_Inf)
2187        Exp = 0;
2188
2189      return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), Exp));
2190    }
2191
2192    if (isa<UndefValue>(Src))
2193      return replaceInstUsesWith(CI, UndefValue::get(II->getType()));
2194
2195    break;
2196  }
2197  case Intrinsic::stackrestore: {
2198    // If the save is right next to the restore, remove the restore.  This can
2199    // happen when variable allocas are DCE'd.
2200    if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
2201      if (SS->getIntrinsicID() == Intrinsic::stacksave) {
2202        if (&*++SS->getIterator() == II)
2203          return eraseInstFromFunction(CI);
2204      }
2205    }
2206
2207    // Scan down this block to see if there is another stack restore in the
2208    // same block without an intervening call/alloca.
2209    BasicBlock::iterator BI(II);
2210    TerminatorInst *TI = II->getParent()->getTerminator();
2211    bool CannotRemove = false;
2212    for (++BI; &*BI != TI; ++BI) {
2213      if (isa<AllocaInst>(BI)) {
2214        CannotRemove = true;
2215        break;
2216      }
2217      if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
2218        if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
2219          // If there is a stackrestore below this one, remove this one.
2220          if (II->getIntrinsicID() == Intrinsic::stackrestore)
2221            return eraseInstFromFunction(CI);
2222
2223          // Bail if we cross over an intrinsic with side effects, such as
2224          // llvm.stacksave, llvm.read_register, or llvm.setjmp.
2225          if (II->mayHaveSideEffects()) {
2226            CannotRemove = true;
2227            break;
2228          }
2229        } else {
2230          // If we found a non-intrinsic call, we can't remove the stack
2231          // restore.
2232          CannotRemove = true;
2233          break;
2234        }
2235      }
2236    }
2237
2238    // If the stack restore is in a return, resume, or unwind block and if there
2239    // are no allocas or calls between the restore and the return, nuke the
2240    // restore.
2241    if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
2242      return eraseInstFromFunction(CI);
2243    break;
2244  }
2245  case Intrinsic::lifetime_start:
2246    if (removeTriviallyEmptyRange(*II, Intrinsic::lifetime_start,
2247                                  Intrinsic::lifetime_end, *this))
2248      return nullptr;
2249    break;
2250  case Intrinsic::assume: {
2251    Value *IIOperand = II->getArgOperand(0);
2252    // Remove an assume if it is immediately followed by an identical assume.
2253    if (match(II->getNextNode(),
2254              m_Intrinsic<Intrinsic::assume>(m_Specific(IIOperand))))
2255      return eraseInstFromFunction(CI);
2256
2257    // Canonicalize assume(a && b) -> assume(a); assume(b);
2258    // Note: New assumption intrinsics created here are registered by
2259    // the InstCombineIRInserter object.
2260    Value *AssumeIntrinsic = II->getCalledValue(), *A, *B;
2261    if (match(IIOperand, m_And(m_Value(A), m_Value(B)))) {
2262      Builder->CreateCall(AssumeIntrinsic, A, II->getName());
2263      Builder->CreateCall(AssumeIntrinsic, B, II->getName());
2264      return eraseInstFromFunction(*II);
2265    }
2266    // assume(!(a || b)) -> assume(!a); assume(!b);
2267    if (match(IIOperand, m_Not(m_Or(m_Value(A), m_Value(B))))) {
2268      Builder->CreateCall(AssumeIntrinsic, Builder->CreateNot(A),
2269                          II->getName());
2270      Builder->CreateCall(AssumeIntrinsic, Builder->CreateNot(B),
2271                          II->getName());
2272      return eraseInstFromFunction(*II);
2273    }
2274
2275    // assume( (load addr) != null ) -> add 'nonnull' metadata to load
2276    // (if assume is valid at the load)
2277    if (ICmpInst* ICmp = dyn_cast<ICmpInst>(IIOperand)) {
2278      Value *LHS = ICmp->getOperand(0);
2279      Value *RHS = ICmp->getOperand(1);
2280      if (ICmpInst::ICMP_NE == ICmp->getPredicate() &&
2281          isa<LoadInst>(LHS) &&
2282          isa<Constant>(RHS) &&
2283          RHS->getType()->isPointerTy() &&
2284          cast<Constant>(RHS)->isNullValue()) {
2285        LoadInst* LI = cast<LoadInst>(LHS);
2286        if (isValidAssumeForContext(II, LI, DT)) {
2287          MDNode *MD = MDNode::get(II->getContext(), None);
2288          LI->setMetadata(LLVMContext::MD_nonnull, MD);
2289          return eraseInstFromFunction(*II);
2290        }
2291      }
2292      // TODO: apply nonnull return attributes to calls and invokes
2293      // TODO: apply range metadata for range check patterns?
2294    }
2295    // If there is a dominating assume with the same condition as this one,
2296    // then this one is redundant, and should be removed.
2297    APInt KnownZero(1, 0), KnownOne(1, 0);
2298    computeKnownBits(IIOperand, KnownZero, KnownOne, 0, II);
2299    if (KnownOne.isAllOnesValue())
2300      return eraseInstFromFunction(*II);
2301
2302    break;
2303  }
2304  case Intrinsic::experimental_gc_relocate: {
2305    // Translate facts known about a pointer before relocating into
2306    // facts about the relocate value, while being careful to
2307    // preserve relocation semantics.
2308    Value *DerivedPtr = cast<GCRelocateInst>(II)->getDerivedPtr();
2309
2310    // Remove the relocation if unused, note that this check is required
2311    // to prevent the cases below from looping forever.
2312    if (II->use_empty())
2313      return eraseInstFromFunction(*II);
2314
2315    // Undef is undef, even after relocation.
2316    // TODO: provide a hook for this in GCStrategy.  This is clearly legal for
2317    // most practical collectors, but there was discussion in the review thread
2318    // about whether it was legal for all possible collectors.
2319    if (isa<UndefValue>(DerivedPtr))
2320      // Use undef of gc_relocate's type to replace it.
2321      return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
2322
2323    if (auto *PT = dyn_cast<PointerType>(II->getType())) {
2324      // The relocation of null will be null for most any collector.
2325      // TODO: provide a hook for this in GCStrategy.  There might be some
2326      // weird collector this property does not hold for.
2327      if (isa<ConstantPointerNull>(DerivedPtr))
2328        // Use null-pointer of gc_relocate's type to replace it.
2329        return replaceInstUsesWith(*II, ConstantPointerNull::get(PT));
2330
2331      // isKnownNonNull -> nonnull attribute
2332      if (isKnownNonNullAt(DerivedPtr, II, DT))
2333        II->addAttribute(AttributeSet::ReturnIndex, Attribute::NonNull);
2334    }
2335
2336    // TODO: bitcast(relocate(p)) -> relocate(bitcast(p))
2337    // Canonicalize on the type from the uses to the defs
2338
2339    // TODO: relocate((gep p, C, C2, ...)) -> gep(relocate(p), C, C2, ...)
2340    break;
2341  }
2342  }
2343
2344  return visitCallSite(II);
2345}
2346
2347// InvokeInst simplification
2348//
2349Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
2350  return visitCallSite(&II);
2351}
2352
2353/// If this cast does not affect the value passed through the varargs area, we
2354/// can eliminate the use of the cast.
2355static bool isSafeToEliminateVarargsCast(const CallSite CS,
2356                                         const DataLayout &DL,
2357                                         const CastInst *const CI,
2358                                         const int ix) {
2359  if (!CI->isLosslessCast())
2360    return false;
2361
2362  // If this is a GC intrinsic, avoid munging types.  We need types for
2363  // statepoint reconstruction in SelectionDAG.
2364  // TODO: This is probably something which should be expanded to all
2365  // intrinsics since the entire point of intrinsics is that
2366  // they are understandable by the optimizer.
2367  if (isStatepoint(CS) || isGCRelocate(CS) || isGCResult(CS))
2368    return false;
2369
2370  // The size of ByVal or InAlloca arguments is derived from the type, so we
2371  // can't change to a type with a different size.  If the size were
2372  // passed explicitly we could avoid this check.
2373  if (!CS.isByValOrInAllocaArgument(ix))
2374    return true;
2375
2376  Type* SrcTy =
2377            cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
2378  Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
2379  if (!SrcTy->isSized() || !DstTy->isSized())
2380    return false;
2381  if (DL.getTypeAllocSize(SrcTy) != DL.getTypeAllocSize(DstTy))
2382    return false;
2383  return true;
2384}
2385
2386Instruction *InstCombiner::tryOptimizeCall(CallInst *CI) {
2387  if (!CI->getCalledFunction()) return nullptr;
2388
2389  auto InstCombineRAUW = [this](Instruction *From, Value *With) {
2390    replaceInstUsesWith(*From, With);
2391  };
2392  LibCallSimplifier Simplifier(DL, TLI, InstCombineRAUW);
2393  if (Value *With = Simplifier.optimizeCall(CI)) {
2394    ++NumSimplified;
2395    return CI->use_empty() ? CI : replaceInstUsesWith(*CI, With);
2396  }
2397
2398  return nullptr;
2399}
2400
2401static IntrinsicInst *findInitTrampolineFromAlloca(Value *TrampMem) {
2402  // Strip off at most one level of pointer casts, looking for an alloca.  This
2403  // is good enough in practice and simpler than handling any number of casts.
2404  Value *Underlying = TrampMem->stripPointerCasts();
2405  if (Underlying != TrampMem &&
2406      (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
2407    return nullptr;
2408  if (!isa<AllocaInst>(Underlying))
2409    return nullptr;
2410
2411  IntrinsicInst *InitTrampoline = nullptr;
2412  for (User *U : TrampMem->users()) {
2413    IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
2414    if (!II)
2415      return nullptr;
2416    if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
2417      if (InitTrampoline)
2418        // More than one init_trampoline writes to this value.  Give up.
2419        return nullptr;
2420      InitTrampoline = II;
2421      continue;
2422    }
2423    if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)
2424      // Allow any number of calls to adjust.trampoline.
2425      continue;
2426    return nullptr;
2427  }
2428
2429  // No call to init.trampoline found.
2430  if (!InitTrampoline)
2431    return nullptr;
2432
2433  // Check that the alloca is being used in the expected way.
2434  if (InitTrampoline->getOperand(0) != TrampMem)
2435    return nullptr;
2436
2437  return InitTrampoline;
2438}
2439
2440static IntrinsicInst *findInitTrampolineFromBB(IntrinsicInst *AdjustTramp,
2441                                               Value *TrampMem) {
2442  // Visit all the previous instructions in the basic block, and try to find a
2443  // init.trampoline which has a direct path to the adjust.trampoline.
2444  for (BasicBlock::iterator I = AdjustTramp->getIterator(),
2445                            E = AdjustTramp->getParent()->begin();
2446       I != E;) {
2447    Instruction *Inst = &*--I;
2448    if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
2449      if (II->getIntrinsicID() == Intrinsic::init_trampoline &&
2450          II->getOperand(0) == TrampMem)
2451        return II;
2452    if (Inst->mayWriteToMemory())
2453      return nullptr;
2454  }
2455  return nullptr;
2456}
2457
2458// Given a call to llvm.adjust.trampoline, find and return the corresponding
2459// call to llvm.init.trampoline if the call to the trampoline can be optimized
2460// to a direct call to a function.  Otherwise return NULL.
2461//
2462static IntrinsicInst *findInitTrampoline(Value *Callee) {
2463  Callee = Callee->stripPointerCasts();
2464  IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
2465  if (!AdjustTramp ||
2466      AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline)
2467    return nullptr;
2468
2469  Value *TrampMem = AdjustTramp->getOperand(0);
2470
2471  if (IntrinsicInst *IT = findInitTrampolineFromAlloca(TrampMem))
2472    return IT;
2473  if (IntrinsicInst *IT = findInitTrampolineFromBB(AdjustTramp, TrampMem))
2474    return IT;
2475  return nullptr;
2476}
2477
2478/// Improvements for call and invoke instructions.
2479Instruction *InstCombiner::visitCallSite(CallSite CS) {
2480
2481  if (isAllocLikeFn(CS.getInstruction(), TLI))
2482    return visitAllocSite(*CS.getInstruction());
2483
2484  bool Changed = false;
2485
2486  // Mark any parameters that are known to be non-null with the nonnull
2487  // attribute.  This is helpful for inlining calls to functions with null
2488  // checks on their arguments.
2489  SmallVector<unsigned, 4> Indices;
2490  unsigned ArgNo = 0;
2491
2492  for (Value *V : CS.args()) {
2493    if (V->getType()->isPointerTy() &&
2494        !CS.paramHasAttr(ArgNo + 1, Attribute::NonNull) &&
2495        isKnownNonNullAt(V, CS.getInstruction(), DT))
2496      Indices.push_back(ArgNo + 1);
2497    ArgNo++;
2498  }
2499
2500  assert(ArgNo == CS.arg_size() && "sanity check");
2501
2502  if (!Indices.empty()) {
2503    AttributeSet AS = CS.getAttributes();
2504    LLVMContext &Ctx = CS.getInstruction()->getContext();
2505    AS = AS.addAttribute(Ctx, Indices,
2506                         Attribute::get(Ctx, Attribute::NonNull));
2507    CS.setAttributes(AS);
2508    Changed = true;
2509  }
2510
2511  // If the callee is a pointer to a function, attempt to move any casts to the
2512  // arguments of the call/invoke.
2513  Value *Callee = CS.getCalledValue();
2514  if (!isa<Function>(Callee) && transformConstExprCastCall(CS))
2515    return nullptr;
2516
2517  if (Function *CalleeF = dyn_cast<Function>(Callee)) {
2518    // Remove the convergent attr on calls when the callee is not convergent.
2519    if (CS.isConvergent() && !CalleeF->isConvergent() &&
2520        !CalleeF->isIntrinsic()) {
2521      DEBUG(dbgs() << "Removing convergent attr from instr "
2522                   << CS.getInstruction() << "\n");
2523      CS.setNotConvergent();
2524      return CS.getInstruction();
2525    }
2526
2527    // If the call and callee calling conventions don't match, this call must
2528    // be unreachable, as the call is undefined.
2529    if (CalleeF->getCallingConv() != CS.getCallingConv() &&
2530        // Only do this for calls to a function with a body.  A prototype may
2531        // not actually end up matching the implementation's calling conv for a
2532        // variety of reasons (e.g. it may be written in assembly).
2533        !CalleeF->isDeclaration()) {
2534      Instruction *OldCall = CS.getInstruction();
2535      new StoreInst(ConstantInt::getTrue(Callee->getContext()),
2536                UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
2537                                  OldCall);
2538      // If OldCall does not return void then replaceAllUsesWith undef.
2539      // This allows ValueHandlers and custom metadata to adjust itself.
2540      if (!OldCall->getType()->isVoidTy())
2541        replaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType()));
2542      if (isa<CallInst>(OldCall))
2543        return eraseInstFromFunction(*OldCall);
2544
2545      // We cannot remove an invoke, because it would change the CFG, just
2546      // change the callee to a null pointer.
2547      cast<InvokeInst>(OldCall)->setCalledFunction(
2548                                    Constant::getNullValue(CalleeF->getType()));
2549      return nullptr;
2550    }
2551  }
2552
2553  if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
2554    // If CS does not return void then replaceAllUsesWith undef.
2555    // This allows ValueHandlers and custom metadata to adjust itself.
2556    if (!CS.getInstruction()->getType()->isVoidTy())
2557      replaceInstUsesWith(*CS.getInstruction(),
2558                          UndefValue::get(CS.getInstruction()->getType()));
2559
2560    if (isa<InvokeInst>(CS.getInstruction())) {
2561      // Can't remove an invoke because we cannot change the CFG.
2562      return nullptr;
2563    }
2564
2565    // This instruction is not reachable, just remove it.  We insert a store to
2566    // undef so that we know that this code is not reachable, despite the fact
2567    // that we can't modify the CFG here.
2568    new StoreInst(ConstantInt::getTrue(Callee->getContext()),
2569                  UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
2570                  CS.getInstruction());
2571
2572    return eraseInstFromFunction(*CS.getInstruction());
2573  }
2574
2575  if (IntrinsicInst *II = findInitTrampoline(Callee))
2576    return transformCallThroughTrampoline(CS, II);
2577
2578  PointerType *PTy = cast<PointerType>(Callee->getType());
2579  FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
2580  if (FTy->isVarArg()) {
2581    int ix = FTy->getNumParams();
2582    // See if we can optimize any arguments passed through the varargs area of
2583    // the call.
2584    for (CallSite::arg_iterator I = CS.arg_begin() + FTy->getNumParams(),
2585           E = CS.arg_end(); I != E; ++I, ++ix) {
2586      CastInst *CI = dyn_cast<CastInst>(*I);
2587      if (CI && isSafeToEliminateVarargsCast(CS, DL, CI, ix)) {
2588        *I = CI->getOperand(0);
2589        Changed = true;
2590      }
2591    }
2592  }
2593
2594  if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
2595    // Inline asm calls cannot throw - mark them 'nounwind'.
2596    CS.setDoesNotThrow();
2597    Changed = true;
2598  }
2599
2600  // Try to optimize the call if possible, we require DataLayout for most of
2601  // this.  None of these calls are seen as possibly dead so go ahead and
2602  // delete the instruction now.
2603  if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
2604    Instruction *I = tryOptimizeCall(CI);
2605    // If we changed something return the result, etc. Otherwise let
2606    // the fallthrough check.
2607    if (I) return eraseInstFromFunction(*I);
2608  }
2609
2610  return Changed ? CS.getInstruction() : nullptr;
2611}
2612
2613/// If the callee is a constexpr cast of a function, attempt to move the cast to
2614/// the arguments of the call/invoke.
2615bool InstCombiner::transformConstExprCastCall(CallSite CS) {
2616  Function *Callee =
2617    dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
2618  if (!Callee)
2619    return false;
2620  // The prototype of thunks are a lie, don't try to directly call such
2621  // functions.
2622  if (Callee->hasFnAttribute("thunk"))
2623    return false;
2624  Instruction *Caller = CS.getInstruction();
2625  const AttributeSet &CallerPAL = CS.getAttributes();
2626
2627  // Okay, this is a cast from a function to a different type.  Unless doing so
2628  // would cause a type conversion of one of our arguments, change this call to
2629  // be a direct call with arguments casted to the appropriate types.
2630  //
2631  FunctionType *FT = Callee->getFunctionType();
2632  Type *OldRetTy = Caller->getType();
2633  Type *NewRetTy = FT->getReturnType();
2634
2635  // Check to see if we are changing the return type...
2636  if (OldRetTy != NewRetTy) {
2637
2638    if (NewRetTy->isStructTy())
2639      return false; // TODO: Handle multiple return values.
2640
2641    if (!CastInst::isBitOrNoopPointerCastable(NewRetTy, OldRetTy, DL)) {
2642      if (Callee->isDeclaration())
2643        return false;   // Cannot transform this return value.
2644
2645      if (!Caller->use_empty() &&
2646          // void -> non-void is handled specially
2647          !NewRetTy->isVoidTy())
2648        return false;   // Cannot transform this return value.
2649    }
2650
2651    if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
2652      AttrBuilder RAttrs(CallerPAL, AttributeSet::ReturnIndex);
2653      if (RAttrs.overlaps(AttributeFuncs::typeIncompatible(NewRetTy)))
2654        return false;   // Attribute not compatible with transformed value.
2655    }
2656
2657    // If the callsite is an invoke instruction, and the return value is used by
2658    // a PHI node in a successor, we cannot change the return type of the call
2659    // because there is no place to put the cast instruction (without breaking
2660    // the critical edge).  Bail out in this case.
2661    if (!Caller->use_empty())
2662      if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
2663        for (User *U : II->users())
2664          if (PHINode *PN = dyn_cast<PHINode>(U))
2665            if (PN->getParent() == II->getNormalDest() ||
2666                PN->getParent() == II->getUnwindDest())
2667              return false;
2668  }
2669
2670  unsigned NumActualArgs = CS.arg_size();
2671  unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
2672
2673  // Prevent us turning:
2674  // declare void @takes_i32_inalloca(i32* inalloca)
2675  //  call void bitcast (void (i32*)* @takes_i32_inalloca to void (i32)*)(i32 0)
2676  //
2677  // into:
2678  //  call void @takes_i32_inalloca(i32* null)
2679  //
2680  //  Similarly, avoid folding away bitcasts of byval calls.
2681  if (Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||
2682      Callee->getAttributes().hasAttrSomewhere(Attribute::ByVal))
2683    return false;
2684
2685  CallSite::arg_iterator AI = CS.arg_begin();
2686  for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
2687    Type *ParamTy = FT->getParamType(i);
2688    Type *ActTy = (*AI)->getType();
2689
2690    if (!CastInst::isBitOrNoopPointerCastable(ActTy, ParamTy, DL))
2691      return false;   // Cannot transform this parameter value.
2692
2693    if (AttrBuilder(CallerPAL.getParamAttributes(i + 1), i + 1).
2694          overlaps(AttributeFuncs::typeIncompatible(ParamTy)))
2695      return false;   // Attribute not compatible with transformed value.
2696
2697    if (CS.isInAllocaArgument(i))
2698      return false;   // Cannot transform to and from inalloca.
2699
2700    // If the parameter is passed as a byval argument, then we have to have a
2701    // sized type and the sized type has to have the same size as the old type.
2702    if (ParamTy != ActTy &&
2703        CallerPAL.getParamAttributes(i + 1).hasAttribute(i + 1,
2704                                                         Attribute::ByVal)) {
2705      PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
2706      if (!ParamPTy || !ParamPTy->getElementType()->isSized())
2707        return false;
2708
2709      Type *CurElTy = ActTy->getPointerElementType();
2710      if (DL.getTypeAllocSize(CurElTy) !=
2711          DL.getTypeAllocSize(ParamPTy->getElementType()))
2712        return false;
2713    }
2714  }
2715
2716  if (Callee->isDeclaration()) {
2717    // Do not delete arguments unless we have a function body.
2718    if (FT->getNumParams() < NumActualArgs && !FT->isVarArg())
2719      return false;
2720
2721    // If the callee is just a declaration, don't change the varargsness of the
2722    // call.  We don't want to introduce a varargs call where one doesn't
2723    // already exist.
2724    PointerType *APTy = cast<PointerType>(CS.getCalledValue()->getType());
2725    if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg())
2726      return false;
2727
2728    // If both the callee and the cast type are varargs, we still have to make
2729    // sure the number of fixed parameters are the same or we have the same
2730    // ABI issues as if we introduce a varargs call.
2731    if (FT->isVarArg() &&
2732        cast<FunctionType>(APTy->getElementType())->isVarArg() &&
2733        FT->getNumParams() !=
2734        cast<FunctionType>(APTy->getElementType())->getNumParams())
2735      return false;
2736  }
2737
2738  if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
2739      !CallerPAL.isEmpty())
2740    // In this case we have more arguments than the new function type, but we
2741    // won't be dropping them.  Check that these extra arguments have attributes
2742    // that are compatible with being a vararg call argument.
2743    for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
2744      unsigned Index = CallerPAL.getSlotIndex(i - 1);
2745      if (Index <= FT->getNumParams())
2746        break;
2747
2748      // Check if it has an attribute that's incompatible with varargs.
2749      AttributeSet PAttrs = CallerPAL.getSlotAttributes(i - 1);
2750      if (PAttrs.hasAttribute(Index, Attribute::StructRet))
2751        return false;
2752    }
2753
2754
2755  // Okay, we decided that this is a safe thing to do: go ahead and start
2756  // inserting cast instructions as necessary.
2757  std::vector<Value*> Args;
2758  Args.reserve(NumActualArgs);
2759  SmallVector<AttributeSet, 8> attrVec;
2760  attrVec.reserve(NumCommonArgs);
2761
2762  // Get any return attributes.
2763  AttrBuilder RAttrs(CallerPAL, AttributeSet::ReturnIndex);
2764
2765  // If the return value is not being used, the type may not be compatible
2766  // with the existing attributes.  Wipe out any problematic attributes.
2767  RAttrs.remove(AttributeFuncs::typeIncompatible(NewRetTy));
2768
2769  // Add the new return attributes.
2770  if (RAttrs.hasAttributes())
2771    attrVec.push_back(AttributeSet::get(Caller->getContext(),
2772                                        AttributeSet::ReturnIndex, RAttrs));
2773
2774  AI = CS.arg_begin();
2775  for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
2776    Type *ParamTy = FT->getParamType(i);
2777
2778    if ((*AI)->getType() == ParamTy) {
2779      Args.push_back(*AI);
2780    } else {
2781      Args.push_back(Builder->CreateBitOrPointerCast(*AI, ParamTy));
2782    }
2783
2784    // Add any parameter attributes.
2785    AttrBuilder PAttrs(CallerPAL.getParamAttributes(i + 1), i + 1);
2786    if (PAttrs.hasAttributes())
2787      attrVec.push_back(AttributeSet::get(Caller->getContext(), i + 1,
2788                                          PAttrs));
2789  }
2790
2791  // If the function takes more arguments than the call was taking, add them
2792  // now.
2793  for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i)
2794    Args.push_back(Constant::getNullValue(FT->getParamType(i)));
2795
2796  // If we are removing arguments to the function, emit an obnoxious warning.
2797  if (FT->getNumParams() < NumActualArgs) {
2798    // TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722
2799    if (FT->isVarArg()) {
2800      // Add all of the arguments in their promoted form to the arg list.
2801      for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
2802        Type *PTy = getPromotedType((*AI)->getType());
2803        if (PTy != (*AI)->getType()) {
2804          // Must promote to pass through va_arg area!
2805          Instruction::CastOps opcode =
2806            CastInst::getCastOpcode(*AI, false, PTy, false);
2807          Args.push_back(Builder->CreateCast(opcode, *AI, PTy));
2808        } else {
2809          Args.push_back(*AI);
2810        }
2811
2812        // Add any parameter attributes.
2813        AttrBuilder PAttrs(CallerPAL.getParamAttributes(i + 1), i + 1);
2814        if (PAttrs.hasAttributes())
2815          attrVec.push_back(AttributeSet::get(FT->getContext(), i + 1,
2816                                              PAttrs));
2817      }
2818    }
2819  }
2820
2821  AttributeSet FnAttrs = CallerPAL.getFnAttributes();
2822  if (CallerPAL.hasAttributes(AttributeSet::FunctionIndex))
2823    attrVec.push_back(AttributeSet::get(Callee->getContext(), FnAttrs));
2824
2825  if (NewRetTy->isVoidTy())
2826    Caller->setName("");   // Void type should not have a name.
2827
2828  const AttributeSet &NewCallerPAL = AttributeSet::get(Callee->getContext(),
2829                                                       attrVec);
2830
2831  SmallVector<OperandBundleDef, 1> OpBundles;
2832  CS.getOperandBundlesAsDefs(OpBundles);
2833
2834  Instruction *NC;
2835  if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
2836    NC = Builder->CreateInvoke(Callee, II->getNormalDest(), II->getUnwindDest(),
2837                               Args, OpBundles);
2838    NC->takeName(II);
2839    cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv());
2840    cast<InvokeInst>(NC)->setAttributes(NewCallerPAL);
2841  } else {
2842    CallInst *CI = cast<CallInst>(Caller);
2843    NC = Builder->CreateCall(Callee, Args, OpBundles);
2844    NC->takeName(CI);
2845    if (CI->isTailCall())
2846      cast<CallInst>(NC)->setTailCall();
2847    cast<CallInst>(NC)->setCallingConv(CI->getCallingConv());
2848    cast<CallInst>(NC)->setAttributes(NewCallerPAL);
2849  }
2850
2851  // Insert a cast of the return type as necessary.
2852  Value *NV = NC;
2853  if (OldRetTy != NV->getType() && !Caller->use_empty()) {
2854    if (!NV->getType()->isVoidTy()) {
2855      NV = NC = CastInst::CreateBitOrPointerCast(NC, OldRetTy);
2856      NC->setDebugLoc(Caller->getDebugLoc());
2857
2858      // If this is an invoke instruction, we should insert it after the first
2859      // non-phi, instruction in the normal successor block.
2860      if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
2861        BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt();
2862        InsertNewInstBefore(NC, *I);
2863      } else {
2864        // Otherwise, it's a call, just insert cast right after the call.
2865        InsertNewInstBefore(NC, *Caller);
2866      }
2867      Worklist.AddUsersToWorkList(*Caller);
2868    } else {
2869      NV = UndefValue::get(Caller->getType());
2870    }
2871  }
2872
2873  if (!Caller->use_empty())
2874    replaceInstUsesWith(*Caller, NV);
2875  else if (Caller->hasValueHandle()) {
2876    if (OldRetTy == NV->getType())
2877      ValueHandleBase::ValueIsRAUWd(Caller, NV);
2878    else
2879      // We cannot call ValueIsRAUWd with a different type, and the
2880      // actual tracked value will disappear.
2881      ValueHandleBase::ValueIsDeleted(Caller);
2882  }
2883
2884  eraseInstFromFunction(*Caller);
2885  return true;
2886}
2887
2888/// Turn a call to a function created by init_trampoline / adjust_trampoline
2889/// intrinsic pair into a direct call to the underlying function.
2890Instruction *
2891InstCombiner::transformCallThroughTrampoline(CallSite CS,
2892                                             IntrinsicInst *Tramp) {
2893  Value *Callee = CS.getCalledValue();
2894  PointerType *PTy = cast<PointerType>(Callee->getType());
2895  FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
2896  const AttributeSet &Attrs = CS.getAttributes();
2897
2898  // If the call already has the 'nest' attribute somewhere then give up -
2899  // otherwise 'nest' would occur twice after splicing in the chain.
2900  if (Attrs.hasAttrSomewhere(Attribute::Nest))
2901    return nullptr;
2902
2903  assert(Tramp &&
2904         "transformCallThroughTrampoline called with incorrect CallSite.");
2905
2906  Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts());
2907  FunctionType *NestFTy = cast<FunctionType>(NestF->getValueType());
2908
2909  const AttributeSet &NestAttrs = NestF->getAttributes();
2910  if (!NestAttrs.isEmpty()) {
2911    unsigned NestIdx = 1;
2912    Type *NestTy = nullptr;
2913    AttributeSet NestAttr;
2914
2915    // Look for a parameter marked with the 'nest' attribute.
2916    for (FunctionType::param_iterator I = NestFTy->param_begin(),
2917         E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
2918      if (NestAttrs.hasAttribute(NestIdx, Attribute::Nest)) {
2919        // Record the parameter type and any other attributes.
2920        NestTy = *I;
2921        NestAttr = NestAttrs.getParamAttributes(NestIdx);
2922        break;
2923      }
2924
2925    if (NestTy) {
2926      Instruction *Caller = CS.getInstruction();
2927      std::vector<Value*> NewArgs;
2928      NewArgs.reserve(CS.arg_size() + 1);
2929
2930      SmallVector<AttributeSet, 8> NewAttrs;
2931      NewAttrs.reserve(Attrs.getNumSlots() + 1);
2932
2933      // Insert the nest argument into the call argument list, which may
2934      // mean appending it.  Likewise for attributes.
2935
2936      // Add any result attributes.
2937      if (Attrs.hasAttributes(AttributeSet::ReturnIndex))
2938        NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
2939                                             Attrs.getRetAttributes()));
2940
2941      {
2942        unsigned Idx = 1;
2943        CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
2944        do {
2945          if (Idx == NestIdx) {
2946            // Add the chain argument and attributes.
2947            Value *NestVal = Tramp->getArgOperand(2);
2948            if (NestVal->getType() != NestTy)
2949              NestVal = Builder->CreateBitCast(NestVal, NestTy, "nest");
2950            NewArgs.push_back(NestVal);
2951            NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
2952                                                 NestAttr));
2953          }
2954
2955          if (I == E)
2956            break;
2957
2958          // Add the original argument and attributes.
2959          NewArgs.push_back(*I);
2960          AttributeSet Attr = Attrs.getParamAttributes(Idx);
2961          if (Attr.hasAttributes(Idx)) {
2962            AttrBuilder B(Attr, Idx);
2963            NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
2964                                                 Idx + (Idx >= NestIdx), B));
2965          }
2966
2967          ++Idx;
2968          ++I;
2969        } while (1);
2970      }
2971
2972      // Add any function attributes.
2973      if (Attrs.hasAttributes(AttributeSet::FunctionIndex))
2974        NewAttrs.push_back(AttributeSet::get(FTy->getContext(),
2975                                             Attrs.getFnAttributes()));
2976
2977      // The trampoline may have been bitcast to a bogus type (FTy).
2978      // Handle this by synthesizing a new function type, equal to FTy
2979      // with the chain parameter inserted.
2980
2981      std::vector<Type*> NewTypes;
2982      NewTypes.reserve(FTy->getNumParams()+1);
2983
2984      // Insert the chain's type into the list of parameter types, which may
2985      // mean appending it.
2986      {
2987        unsigned Idx = 1;
2988        FunctionType::param_iterator I = FTy->param_begin(),
2989          E = FTy->param_end();
2990
2991        do {
2992          if (Idx == NestIdx)
2993            // Add the chain's type.
2994            NewTypes.push_back(NestTy);
2995
2996          if (I == E)
2997            break;
2998
2999          // Add the original type.
3000          NewTypes.push_back(*I);
3001
3002          ++Idx;
3003          ++I;
3004        } while (1);
3005      }
3006
3007      // Replace the trampoline call with a direct call.  Let the generic
3008      // code sort out any function type mismatches.
3009      FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
3010                                                FTy->isVarArg());
3011      Constant *NewCallee =
3012        NestF->getType() == PointerType::getUnqual(NewFTy) ?
3013        NestF : ConstantExpr::getBitCast(NestF,
3014                                         PointerType::getUnqual(NewFTy));
3015      const AttributeSet &NewPAL =
3016          AttributeSet::get(FTy->getContext(), NewAttrs);
3017
3018      SmallVector<OperandBundleDef, 1> OpBundles;
3019      CS.getOperandBundlesAsDefs(OpBundles);
3020
3021      Instruction *NewCaller;
3022      if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
3023        NewCaller = InvokeInst::Create(NewCallee,
3024                                       II->getNormalDest(), II->getUnwindDest(),
3025                                       NewArgs, OpBundles);
3026        cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
3027        cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
3028      } else {
3029        NewCaller = CallInst::Create(NewCallee, NewArgs, OpBundles);
3030        if (cast<CallInst>(Caller)->isTailCall())
3031          cast<CallInst>(NewCaller)->setTailCall();
3032        cast<CallInst>(NewCaller)->
3033          setCallingConv(cast<CallInst>(Caller)->getCallingConv());
3034        cast<CallInst>(NewCaller)->setAttributes(NewPAL);
3035      }
3036
3037      return NewCaller;
3038    }
3039  }
3040
3041  // Replace the trampoline call with a direct call.  Since there is no 'nest'
3042  // parameter, there is no need to adjust the argument list.  Let the generic
3043  // code sort out any function type mismatches.
3044  Constant *NewCallee =
3045    NestF->getType() == PTy ? NestF :
3046                              ConstantExpr::getBitCast(NestF, PTy);
3047  CS.setCalledFunction(NewCallee);
3048  return CS.getInstruction();
3049}
3050