CGBuiltin.cpp revision 09df2b066221d869f17f4b5762405f111a65f983
1//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This contains code to emit Builtin calls as LLVM code.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CGObjCRuntime.h"
16#include "CodeGenModule.h"
17#include "TargetInfo.h"
18#include "clang/AST/ASTContext.h"
19#include "clang/AST/Decl.h"
20#include "clang/Basic/TargetBuiltins.h"
21#include "clang/Basic/TargetInfo.h"
22#include "llvm/IR/DataLayout.h"
23#include "llvm/IR/Intrinsics.h"
24
25using namespace clang;
26using namespace CodeGen;
27using namespace llvm;
28
29/// getBuiltinLibFunction - Given a builtin id for a function like
30/// "__builtin_fabsf", return a Function* for "fabsf".
31llvm::Value *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
32                                                  unsigned BuiltinID) {
33  assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
34
35  // Get the name, skip over the __builtin_ prefix (if necessary).
36  StringRef Name;
37  GlobalDecl D(FD);
38
39  // If the builtin has been declared explicitly with an assembler label,
40  // use the mangled name. This differs from the plain label on platforms
41  // that prefix labels.
42  if (FD->hasAttr<AsmLabelAttr>())
43    Name = getMangledName(D);
44  else
45    Name = Context.BuiltinInfo.GetName(BuiltinID) + 10;
46
47  llvm::FunctionType *Ty =
48    cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
49
50  return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
51}
52
53/// Emit the conversions required to turn the given value into an
54/// integer of the given size.
55static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
56                        QualType T, llvm::IntegerType *IntType) {
57  V = CGF.EmitToMemory(V, T);
58
59  if (V->getType()->isPointerTy())
60    return CGF.Builder.CreatePtrToInt(V, IntType);
61
62  assert(V->getType() == IntType);
63  return V;
64}
65
66static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
67                          QualType T, llvm::Type *ResultType) {
68  V = CGF.EmitFromMemory(V, T);
69
70  if (ResultType->isPointerTy())
71    return CGF.Builder.CreateIntToPtr(V, ResultType);
72
73  assert(V->getType() == ResultType);
74  return V;
75}
76
77/// Utility to insert an atomic instruction based on Instrinsic::ID
78/// and the expression node.
79static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
80                               llvm::AtomicRMWInst::BinOp Kind,
81                               const CallExpr *E) {
82  QualType T = E->getType();
83  assert(E->getArg(0)->getType()->isPointerType());
84  assert(CGF.getContext().hasSameUnqualifiedType(T,
85                                  E->getArg(0)->getType()->getPointeeType()));
86  assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
87
88  llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
89  unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
90
91  llvm::IntegerType *IntType =
92    llvm::IntegerType::get(CGF.getLLVMContext(),
93                           CGF.getContext().getTypeSize(T));
94  llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
95
96  llvm::Value *Args[2];
97  Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
98  Args[1] = CGF.EmitScalarExpr(E->getArg(1));
99  llvm::Type *ValueType = Args[1]->getType();
100  Args[1] = EmitToInt(CGF, Args[1], T, IntType);
101
102  llvm::Value *Result =
103      CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1],
104                                  llvm::SequentiallyConsistent);
105  Result = EmitFromInt(CGF, Result, T, ValueType);
106  return RValue::get(Result);
107}
108
109/// Utility to insert an atomic instruction based Instrinsic::ID and
110/// the expression node, where the return value is the result of the
111/// operation.
112static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
113                                   llvm::AtomicRMWInst::BinOp Kind,
114                                   const CallExpr *E,
115                                   Instruction::BinaryOps Op) {
116  QualType T = E->getType();
117  assert(E->getArg(0)->getType()->isPointerType());
118  assert(CGF.getContext().hasSameUnqualifiedType(T,
119                                  E->getArg(0)->getType()->getPointeeType()));
120  assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
121
122  llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
123  unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
124
125  llvm::IntegerType *IntType =
126    llvm::IntegerType::get(CGF.getLLVMContext(),
127                           CGF.getContext().getTypeSize(T));
128  llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
129
130  llvm::Value *Args[2];
131  Args[1] = CGF.EmitScalarExpr(E->getArg(1));
132  llvm::Type *ValueType = Args[1]->getType();
133  Args[1] = EmitToInt(CGF, Args[1], T, IntType);
134  Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
135
136  llvm::Value *Result =
137      CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1],
138                                  llvm::SequentiallyConsistent);
139  Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
140  Result = EmitFromInt(CGF, Result, T, ValueType);
141  return RValue::get(Result);
142}
143
144/// EmitFAbs - Emit a call to fabs/fabsf/fabsl, depending on the type of ValTy,
145/// which must be a scalar floating point type.
146static Value *EmitFAbs(CodeGenFunction &CGF, Value *V, QualType ValTy) {
147  const BuiltinType *ValTyP = ValTy->getAs<BuiltinType>();
148  assert(ValTyP && "isn't scalar fp type!");
149
150  StringRef FnName;
151  switch (ValTyP->getKind()) {
152  default: llvm_unreachable("Isn't a scalar fp type!");
153  case BuiltinType::Float:      FnName = "fabsf"; break;
154  case BuiltinType::Double:     FnName = "fabs"; break;
155  case BuiltinType::LongDouble: FnName = "fabsl"; break;
156  }
157
158  // The prototype is something that takes and returns whatever V's type is.
159  llvm::FunctionType *FT = llvm::FunctionType::get(V->getType(), V->getType(),
160                                                   false);
161  llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction(FT, FnName);
162
163  return CGF.EmitNounwindRuntimeCall(Fn, V, "abs");
164}
165
166static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *Fn,
167                              const CallExpr *E, llvm::Value *calleeValue) {
168  return CGF.EmitCall(E->getCallee()->getType(), calleeValue,
169                      ReturnValueSlot(), E->arg_begin(), E->arg_end(), Fn);
170}
171
172/// \brief Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
173/// depending on IntrinsicID.
174///
175/// \arg CGF The current codegen function.
176/// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
177/// \arg X The first argument to the llvm.*.with.overflow.*.
178/// \arg Y The second argument to the llvm.*.with.overflow.*.
179/// \arg Carry The carry returned by the llvm.*.with.overflow.*.
180/// \returns The result (i.e. sum/product) returned by the intrinsic.
181static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
182                                          const llvm::Intrinsic::ID IntrinsicID,
183                                          llvm::Value *X, llvm::Value *Y,
184                                          llvm::Value *&Carry) {
185  // Make sure we have integers of the same width.
186  assert(X->getType() == Y->getType() &&
187         "Arguments must be the same type. (Did you forget to make sure both "
188         "arguments have the same integer width?)");
189
190  llvm::Value *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
191  llvm::Value *Tmp = CGF.Builder.CreateCall2(Callee, X, Y);
192  Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
193  return CGF.Builder.CreateExtractValue(Tmp, 0);
194}
195
196RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
197                                        unsigned BuiltinID, const CallExpr *E) {
198  // See if we can constant fold this builtin.  If so, don't emit it at all.
199  Expr::EvalResult Result;
200  if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
201      !Result.hasSideEffects()) {
202    if (Result.Val.isInt())
203      return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
204                                                Result.Val.getInt()));
205    if (Result.Val.isFloat())
206      return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
207                                               Result.Val.getFloat()));
208  }
209
210  switch (BuiltinID) {
211  default: break;  // Handle intrinsics and libm functions below.
212  case Builtin::BI__builtin___CFStringMakeConstantString:
213  case Builtin::BI__builtin___NSStringMakeConstantString:
214    return RValue::get(CGM.EmitConstantExpr(E, E->getType(), 0));
215  case Builtin::BI__builtin_stdarg_start:
216  case Builtin::BI__builtin_va_start:
217  case Builtin::BI__builtin_va_end: {
218    Value *ArgValue = EmitVAListRef(E->getArg(0));
219    llvm::Type *DestType = Int8PtrTy;
220    if (ArgValue->getType() != DestType)
221      ArgValue = Builder.CreateBitCast(ArgValue, DestType,
222                                       ArgValue->getName().data());
223
224    Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_end) ?
225      Intrinsic::vaend : Intrinsic::vastart;
226    return RValue::get(Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue));
227  }
228  case Builtin::BI__builtin_va_copy: {
229    Value *DstPtr = EmitVAListRef(E->getArg(0));
230    Value *SrcPtr = EmitVAListRef(E->getArg(1));
231
232    llvm::Type *Type = Int8PtrTy;
233
234    DstPtr = Builder.CreateBitCast(DstPtr, Type);
235    SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
236    return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy),
237                                           DstPtr, SrcPtr));
238  }
239  case Builtin::BI__builtin_abs:
240  case Builtin::BI__builtin_labs:
241  case Builtin::BI__builtin_llabs: {
242    Value *ArgValue = EmitScalarExpr(E->getArg(0));
243
244    Value *NegOp = Builder.CreateNeg(ArgValue, "neg");
245    Value *CmpResult =
246    Builder.CreateICmpSGE(ArgValue,
247                          llvm::Constant::getNullValue(ArgValue->getType()),
248                                                            "abscond");
249    Value *Result =
250      Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs");
251
252    return RValue::get(Result);
253  }
254
255  case Builtin::BI__builtin_conj:
256  case Builtin::BI__builtin_conjf:
257  case Builtin::BI__builtin_conjl: {
258    ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
259    Value *Real = ComplexVal.first;
260    Value *Imag = ComplexVal.second;
261    Value *Zero =
262      Imag->getType()->isFPOrFPVectorTy()
263        ? llvm::ConstantFP::getZeroValueForNegation(Imag->getType())
264        : llvm::Constant::getNullValue(Imag->getType());
265
266    Imag = Builder.CreateFSub(Zero, Imag, "sub");
267    return RValue::getComplex(std::make_pair(Real, Imag));
268  }
269  case Builtin::BI__builtin_creal:
270  case Builtin::BI__builtin_crealf:
271  case Builtin::BI__builtin_creall:
272  case Builtin::BIcreal:
273  case Builtin::BIcrealf:
274  case Builtin::BIcreall: {
275    ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
276    return RValue::get(ComplexVal.first);
277  }
278
279  case Builtin::BI__builtin_cimag:
280  case Builtin::BI__builtin_cimagf:
281  case Builtin::BI__builtin_cimagl:
282  case Builtin::BIcimag:
283  case Builtin::BIcimagf:
284  case Builtin::BIcimagl: {
285    ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
286    return RValue::get(ComplexVal.second);
287  }
288
289  case Builtin::BI__builtin_ctzs:
290  case Builtin::BI__builtin_ctz:
291  case Builtin::BI__builtin_ctzl:
292  case Builtin::BI__builtin_ctzll: {
293    Value *ArgValue = EmitScalarExpr(E->getArg(0));
294
295    llvm::Type *ArgType = ArgValue->getType();
296    Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
297
298    llvm::Type *ResultType = ConvertType(E->getType());
299    Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
300    Value *Result = Builder.CreateCall2(F, ArgValue, ZeroUndef);
301    if (Result->getType() != ResultType)
302      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
303                                     "cast");
304    return RValue::get(Result);
305  }
306  case Builtin::BI__builtin_clzs:
307  case Builtin::BI__builtin_clz:
308  case Builtin::BI__builtin_clzl:
309  case Builtin::BI__builtin_clzll: {
310    Value *ArgValue = EmitScalarExpr(E->getArg(0));
311
312    llvm::Type *ArgType = ArgValue->getType();
313    Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
314
315    llvm::Type *ResultType = ConvertType(E->getType());
316    Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
317    Value *Result = Builder.CreateCall2(F, ArgValue, ZeroUndef);
318    if (Result->getType() != ResultType)
319      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
320                                     "cast");
321    return RValue::get(Result);
322  }
323  case Builtin::BI__builtin_ffs:
324  case Builtin::BI__builtin_ffsl:
325  case Builtin::BI__builtin_ffsll: {
326    // ffs(x) -> x ? cttz(x) + 1 : 0
327    Value *ArgValue = EmitScalarExpr(E->getArg(0));
328
329    llvm::Type *ArgType = ArgValue->getType();
330    Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
331
332    llvm::Type *ResultType = ConvertType(E->getType());
333    Value *Tmp = Builder.CreateAdd(Builder.CreateCall2(F, ArgValue,
334                                                       Builder.getTrue()),
335                                   llvm::ConstantInt::get(ArgType, 1));
336    Value *Zero = llvm::Constant::getNullValue(ArgType);
337    Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
338    Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
339    if (Result->getType() != ResultType)
340      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
341                                     "cast");
342    return RValue::get(Result);
343  }
344  case Builtin::BI__builtin_parity:
345  case Builtin::BI__builtin_parityl:
346  case Builtin::BI__builtin_parityll: {
347    // parity(x) -> ctpop(x) & 1
348    Value *ArgValue = EmitScalarExpr(E->getArg(0));
349
350    llvm::Type *ArgType = ArgValue->getType();
351    Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
352
353    llvm::Type *ResultType = ConvertType(E->getType());
354    Value *Tmp = Builder.CreateCall(F, ArgValue);
355    Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
356    if (Result->getType() != ResultType)
357      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
358                                     "cast");
359    return RValue::get(Result);
360  }
361  case Builtin::BI__builtin_popcount:
362  case Builtin::BI__builtin_popcountl:
363  case Builtin::BI__builtin_popcountll: {
364    Value *ArgValue = EmitScalarExpr(E->getArg(0));
365
366    llvm::Type *ArgType = ArgValue->getType();
367    Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
368
369    llvm::Type *ResultType = ConvertType(E->getType());
370    Value *Result = Builder.CreateCall(F, ArgValue);
371    if (Result->getType() != ResultType)
372      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
373                                     "cast");
374    return RValue::get(Result);
375  }
376  case Builtin::BI__builtin_expect: {
377    Value *ArgValue = EmitScalarExpr(E->getArg(0));
378    llvm::Type *ArgType = ArgValue->getType();
379
380    Value *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
381    Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
382
383    Value *Result = Builder.CreateCall2(FnExpect, ArgValue, ExpectedValue,
384                                        "expval");
385    return RValue::get(Result);
386  }
387  case Builtin::BI__builtin_bswap16:
388  case Builtin::BI__builtin_bswap32:
389  case Builtin::BI__builtin_bswap64: {
390    Value *ArgValue = EmitScalarExpr(E->getArg(0));
391    llvm::Type *ArgType = ArgValue->getType();
392    Value *F = CGM.getIntrinsic(Intrinsic::bswap, ArgType);
393    return RValue::get(Builder.CreateCall(F, ArgValue));
394  }
395  case Builtin::BI__builtin_object_size: {
396    // We rely on constant folding to deal with expressions with side effects.
397    assert(!E->getArg(0)->HasSideEffects(getContext()) &&
398           "should have been constant folded");
399
400    // We pass this builtin onto the optimizer so that it can
401    // figure out the object size in more complex cases.
402    llvm::Type *ResType = ConvertType(E->getType());
403
404    // LLVM only supports 0 and 2, make sure that we pass along that
405    // as a boolean.
406    Value *Ty = EmitScalarExpr(E->getArg(1));
407    ConstantInt *CI = dyn_cast<ConstantInt>(Ty);
408    assert(CI);
409    uint64_t val = CI->getZExtValue();
410    CI = ConstantInt::get(Builder.getInt1Ty(), (val & 0x2) >> 1);
411
412    Value *F = CGM.getIntrinsic(Intrinsic::objectsize, ResType);
413    return RValue::get(Builder.CreateCall2(F, EmitScalarExpr(E->getArg(0)),CI));
414  }
415  case Builtin::BI__builtin_prefetch: {
416    Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
417    // FIXME: Technically these constants should of type 'int', yes?
418    RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
419      llvm::ConstantInt::get(Int32Ty, 0);
420    Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
421      llvm::ConstantInt::get(Int32Ty, 3);
422    Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
423    Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
424    return RValue::get(Builder.CreateCall4(F, Address, RW, Locality, Data));
425  }
426  case Builtin::BI__builtin_readcyclecounter: {
427    Value *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
428    return RValue::get(Builder.CreateCall(F));
429  }
430  case Builtin::BI__builtin_trap: {
431    Value *F = CGM.getIntrinsic(Intrinsic::trap);
432    return RValue::get(Builder.CreateCall(F));
433  }
434  case Builtin::BI__debugbreak: {
435    Value *F = CGM.getIntrinsic(Intrinsic::debugtrap);
436    return RValue::get(Builder.CreateCall(F));
437  }
438  case Builtin::BI__builtin_unreachable: {
439    if (SanOpts->Unreachable)
440      EmitCheck(Builder.getFalse(), "builtin_unreachable",
441                EmitCheckSourceLocation(E->getExprLoc()),
442                ArrayRef<llvm::Value *>(), CRK_Unrecoverable);
443    else
444      Builder.CreateUnreachable();
445
446    // We do need to preserve an insertion point.
447    EmitBlock(createBasicBlock("unreachable.cont"));
448
449    return RValue::get(0);
450  }
451
452  case Builtin::BI__builtin_powi:
453  case Builtin::BI__builtin_powif:
454  case Builtin::BI__builtin_powil: {
455    Value *Base = EmitScalarExpr(E->getArg(0));
456    Value *Exponent = EmitScalarExpr(E->getArg(1));
457    llvm::Type *ArgType = Base->getType();
458    Value *F = CGM.getIntrinsic(Intrinsic::powi, ArgType);
459    return RValue::get(Builder.CreateCall2(F, Base, Exponent));
460  }
461
462  case Builtin::BI__builtin_isgreater:
463  case Builtin::BI__builtin_isgreaterequal:
464  case Builtin::BI__builtin_isless:
465  case Builtin::BI__builtin_islessequal:
466  case Builtin::BI__builtin_islessgreater:
467  case Builtin::BI__builtin_isunordered: {
468    // Ordered comparisons: we know the arguments to these are matching scalar
469    // floating point values.
470    Value *LHS = EmitScalarExpr(E->getArg(0));
471    Value *RHS = EmitScalarExpr(E->getArg(1));
472
473    switch (BuiltinID) {
474    default: llvm_unreachable("Unknown ordered comparison");
475    case Builtin::BI__builtin_isgreater:
476      LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
477      break;
478    case Builtin::BI__builtin_isgreaterequal:
479      LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
480      break;
481    case Builtin::BI__builtin_isless:
482      LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
483      break;
484    case Builtin::BI__builtin_islessequal:
485      LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
486      break;
487    case Builtin::BI__builtin_islessgreater:
488      LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
489      break;
490    case Builtin::BI__builtin_isunordered:
491      LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
492      break;
493    }
494    // ZExt bool to int type.
495    return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
496  }
497  case Builtin::BI__builtin_isnan: {
498    Value *V = EmitScalarExpr(E->getArg(0));
499    V = Builder.CreateFCmpUNO(V, V, "cmp");
500    return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
501  }
502
503  case Builtin::BI__builtin_isinf: {
504    // isinf(x) --> fabs(x) == infinity
505    Value *V = EmitScalarExpr(E->getArg(0));
506    V = EmitFAbs(*this, V, E->getArg(0)->getType());
507
508    V = Builder.CreateFCmpOEQ(V, ConstantFP::getInfinity(V->getType()),"isinf");
509    return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
510  }
511
512  // TODO: BI__builtin_isinf_sign
513  //   isinf_sign(x) -> isinf(x) ? (signbit(x) ? -1 : 1) : 0
514
515  case Builtin::BI__builtin_isnormal: {
516    // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
517    Value *V = EmitScalarExpr(E->getArg(0));
518    Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
519
520    Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType());
521    Value *IsLessThanInf =
522      Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
523    APFloat Smallest = APFloat::getSmallestNormalized(
524                   getContext().getFloatTypeSemantics(E->getArg(0)->getType()));
525    Value *IsNormal =
526      Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest),
527                            "isnormal");
528    V = Builder.CreateAnd(Eq, IsLessThanInf, "and");
529    V = Builder.CreateAnd(V, IsNormal, "and");
530    return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
531  }
532
533  case Builtin::BI__builtin_isfinite: {
534    // isfinite(x) --> x == x && fabs(x) != infinity;
535    Value *V = EmitScalarExpr(E->getArg(0));
536    Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
537
538    Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType());
539    Value *IsNotInf =
540      Builder.CreateFCmpUNE(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
541
542    V = Builder.CreateAnd(Eq, IsNotInf, "and");
543    return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
544  }
545
546  case Builtin::BI__builtin_fpclassify: {
547    Value *V = EmitScalarExpr(E->getArg(5));
548    llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
549
550    // Create Result
551    BasicBlock *Begin = Builder.GetInsertBlock();
552    BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
553    Builder.SetInsertPoint(End);
554    PHINode *Result =
555      Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
556                        "fpclassify_result");
557
558    // if (V==0) return FP_ZERO
559    Builder.SetInsertPoint(Begin);
560    Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
561                                          "iszero");
562    Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
563    BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
564    Builder.CreateCondBr(IsZero, End, NotZero);
565    Result->addIncoming(ZeroLiteral, Begin);
566
567    // if (V != V) return FP_NAN
568    Builder.SetInsertPoint(NotZero);
569    Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
570    Value *NanLiteral = EmitScalarExpr(E->getArg(0));
571    BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
572    Builder.CreateCondBr(IsNan, End, NotNan);
573    Result->addIncoming(NanLiteral, NotZero);
574
575    // if (fabs(V) == infinity) return FP_INFINITY
576    Builder.SetInsertPoint(NotNan);
577    Value *VAbs = EmitFAbs(*this, V, E->getArg(5)->getType());
578    Value *IsInf =
579      Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
580                            "isinf");
581    Value *InfLiteral = EmitScalarExpr(E->getArg(1));
582    BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
583    Builder.CreateCondBr(IsInf, End, NotInf);
584    Result->addIncoming(InfLiteral, NotNan);
585
586    // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
587    Builder.SetInsertPoint(NotInf);
588    APFloat Smallest = APFloat::getSmallestNormalized(
589        getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
590    Value *IsNormal =
591      Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
592                            "isnormal");
593    Value *NormalResult =
594      Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
595                           EmitScalarExpr(E->getArg(3)));
596    Builder.CreateBr(End);
597    Result->addIncoming(NormalResult, NotInf);
598
599    // return Result
600    Builder.SetInsertPoint(End);
601    return RValue::get(Result);
602  }
603
604  case Builtin::BIalloca:
605  case Builtin::BI__builtin_alloca: {
606    Value *Size = EmitScalarExpr(E->getArg(0));
607    return RValue::get(Builder.CreateAlloca(Builder.getInt8Ty(), Size));
608  }
609  case Builtin::BIbzero:
610  case Builtin::BI__builtin_bzero: {
611    std::pair<llvm::Value*, unsigned> Dest =
612        EmitPointerWithAlignment(E->getArg(0));
613    Value *SizeVal = EmitScalarExpr(E->getArg(1));
614    Builder.CreateMemSet(Dest.first, Builder.getInt8(0), SizeVal,
615                         Dest.second, false);
616    return RValue::get(Dest.first);
617  }
618  case Builtin::BImemcpy:
619  case Builtin::BI__builtin_memcpy: {
620    std::pair<llvm::Value*, unsigned> Dest =
621        EmitPointerWithAlignment(E->getArg(0));
622    std::pair<llvm::Value*, unsigned> Src =
623        EmitPointerWithAlignment(E->getArg(1));
624    Value *SizeVal = EmitScalarExpr(E->getArg(2));
625    unsigned Align = std::min(Dest.second, Src.second);
626    Builder.CreateMemCpy(Dest.first, Src.first, SizeVal, Align, false);
627    return RValue::get(Dest.first);
628  }
629
630  case Builtin::BI__builtin___memcpy_chk: {
631    // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
632    llvm::APSInt Size, DstSize;
633    if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
634        !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
635      break;
636    if (Size.ugt(DstSize))
637      break;
638    std::pair<llvm::Value*, unsigned> Dest =
639        EmitPointerWithAlignment(E->getArg(0));
640    std::pair<llvm::Value*, unsigned> Src =
641        EmitPointerWithAlignment(E->getArg(1));
642    Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
643    unsigned Align = std::min(Dest.second, Src.second);
644    Builder.CreateMemCpy(Dest.first, Src.first, SizeVal, Align, false);
645    return RValue::get(Dest.first);
646  }
647
648  case Builtin::BI__builtin_objc_memmove_collectable: {
649    Value *Address = EmitScalarExpr(E->getArg(0));
650    Value *SrcAddr = EmitScalarExpr(E->getArg(1));
651    Value *SizeVal = EmitScalarExpr(E->getArg(2));
652    CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
653                                                  Address, SrcAddr, SizeVal);
654    return RValue::get(Address);
655  }
656
657  case Builtin::BI__builtin___memmove_chk: {
658    // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
659    llvm::APSInt Size, DstSize;
660    if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
661        !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
662      break;
663    if (Size.ugt(DstSize))
664      break;
665    std::pair<llvm::Value*, unsigned> Dest =
666        EmitPointerWithAlignment(E->getArg(0));
667    std::pair<llvm::Value*, unsigned> Src =
668        EmitPointerWithAlignment(E->getArg(1));
669    Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
670    unsigned Align = std::min(Dest.second, Src.second);
671    Builder.CreateMemMove(Dest.first, Src.first, SizeVal, Align, false);
672    return RValue::get(Dest.first);
673  }
674
675  case Builtin::BImemmove:
676  case Builtin::BI__builtin_memmove: {
677    std::pair<llvm::Value*, unsigned> Dest =
678        EmitPointerWithAlignment(E->getArg(0));
679    std::pair<llvm::Value*, unsigned> Src =
680        EmitPointerWithAlignment(E->getArg(1));
681    Value *SizeVal = EmitScalarExpr(E->getArg(2));
682    unsigned Align = std::min(Dest.second, Src.second);
683    Builder.CreateMemMove(Dest.first, Src.first, SizeVal, Align, false);
684    return RValue::get(Dest.first);
685  }
686  case Builtin::BImemset:
687  case Builtin::BI__builtin_memset: {
688    std::pair<llvm::Value*, unsigned> Dest =
689        EmitPointerWithAlignment(E->getArg(0));
690    Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
691                                         Builder.getInt8Ty());
692    Value *SizeVal = EmitScalarExpr(E->getArg(2));
693    Builder.CreateMemSet(Dest.first, ByteVal, SizeVal, Dest.second, false);
694    return RValue::get(Dest.first);
695  }
696  case Builtin::BI__builtin___memset_chk: {
697    // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
698    llvm::APSInt Size, DstSize;
699    if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
700        !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
701      break;
702    if (Size.ugt(DstSize))
703      break;
704    std::pair<llvm::Value*, unsigned> Dest =
705        EmitPointerWithAlignment(E->getArg(0));
706    Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
707                                         Builder.getInt8Ty());
708    Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
709    Builder.CreateMemSet(Dest.first, ByteVal, SizeVal, Dest.second, false);
710    return RValue::get(Dest.first);
711  }
712  case Builtin::BI__builtin_dwarf_cfa: {
713    // The offset in bytes from the first argument to the CFA.
714    //
715    // Why on earth is this in the frontend?  Is there any reason at
716    // all that the backend can't reasonably determine this while
717    // lowering llvm.eh.dwarf.cfa()?
718    //
719    // TODO: If there's a satisfactory reason, add a target hook for
720    // this instead of hard-coding 0, which is correct for most targets.
721    int32_t Offset = 0;
722
723    Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
724    return RValue::get(Builder.CreateCall(F,
725                                      llvm::ConstantInt::get(Int32Ty, Offset)));
726  }
727  case Builtin::BI__builtin_return_address: {
728    Value *Depth = EmitScalarExpr(E->getArg(0));
729    Depth = Builder.CreateIntCast(Depth, Int32Ty, false);
730    Value *F = CGM.getIntrinsic(Intrinsic::returnaddress);
731    return RValue::get(Builder.CreateCall(F, Depth));
732  }
733  case Builtin::BI__builtin_frame_address: {
734    Value *Depth = EmitScalarExpr(E->getArg(0));
735    Depth = Builder.CreateIntCast(Depth, Int32Ty, false);
736    Value *F = CGM.getIntrinsic(Intrinsic::frameaddress);
737    return RValue::get(Builder.CreateCall(F, Depth));
738  }
739  case Builtin::BI__builtin_extract_return_addr: {
740    Value *Address = EmitScalarExpr(E->getArg(0));
741    Value *Result = getTargetHooks().decodeReturnAddress(*this, Address);
742    return RValue::get(Result);
743  }
744  case Builtin::BI__builtin_frob_return_addr: {
745    Value *Address = EmitScalarExpr(E->getArg(0));
746    Value *Result = getTargetHooks().encodeReturnAddress(*this, Address);
747    return RValue::get(Result);
748  }
749  case Builtin::BI__builtin_dwarf_sp_column: {
750    llvm::IntegerType *Ty
751      = cast<llvm::IntegerType>(ConvertType(E->getType()));
752    int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
753    if (Column == -1) {
754      CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
755      return RValue::get(llvm::UndefValue::get(Ty));
756    }
757    return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
758  }
759  case Builtin::BI__builtin_init_dwarf_reg_size_table: {
760    Value *Address = EmitScalarExpr(E->getArg(0));
761    if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
762      CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
763    return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
764  }
765  case Builtin::BI__builtin_eh_return: {
766    Value *Int = EmitScalarExpr(E->getArg(0));
767    Value *Ptr = EmitScalarExpr(E->getArg(1));
768
769    llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
770    assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
771           "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
772    Value *F = CGM.getIntrinsic(IntTy->getBitWidth() == 32
773                                  ? Intrinsic::eh_return_i32
774                                  : Intrinsic::eh_return_i64);
775    Builder.CreateCall2(F, Int, Ptr);
776    Builder.CreateUnreachable();
777
778    // We do need to preserve an insertion point.
779    EmitBlock(createBasicBlock("builtin_eh_return.cont"));
780
781    return RValue::get(0);
782  }
783  case Builtin::BI__builtin_unwind_init: {
784    Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
785    return RValue::get(Builder.CreateCall(F));
786  }
787  case Builtin::BI__builtin_extend_pointer: {
788    // Extends a pointer to the size of an _Unwind_Word, which is
789    // uint64_t on all platforms.  Generally this gets poked into a
790    // register and eventually used as an address, so if the
791    // addressing registers are wider than pointers and the platform
792    // doesn't implicitly ignore high-order bits when doing
793    // addressing, we need to make sure we zext / sext based on
794    // the platform's expectations.
795    //
796    // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
797
798    // Cast the pointer to intptr_t.
799    Value *Ptr = EmitScalarExpr(E->getArg(0));
800    Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
801
802    // If that's 64 bits, we're done.
803    if (IntPtrTy->getBitWidth() == 64)
804      return RValue::get(Result);
805
806    // Otherwise, ask the codegen data what to do.
807    if (getTargetHooks().extendPointerWithSExt())
808      return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
809    else
810      return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
811  }
812  case Builtin::BI__builtin_setjmp: {
813    // Buffer is a void**.
814    Value *Buf = EmitScalarExpr(E->getArg(0));
815
816    // Store the frame pointer to the setjmp buffer.
817    Value *FrameAddr =
818      Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress),
819                         ConstantInt::get(Int32Ty, 0));
820    Builder.CreateStore(FrameAddr, Buf);
821
822    // Store the stack pointer to the setjmp buffer.
823    Value *StackAddr =
824      Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave));
825    Value *StackSaveSlot =
826      Builder.CreateGEP(Buf, ConstantInt::get(Int32Ty, 2));
827    Builder.CreateStore(StackAddr, StackSaveSlot);
828
829    // Call LLVM's EH setjmp, which is lightweight.
830    Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
831    Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
832    return RValue::get(Builder.CreateCall(F, Buf));
833  }
834  case Builtin::BI__builtin_longjmp: {
835    Value *Buf = EmitScalarExpr(E->getArg(0));
836    Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
837
838    // Call LLVM's EH longjmp, which is lightweight.
839    Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
840
841    // longjmp doesn't return; mark this as unreachable.
842    Builder.CreateUnreachable();
843
844    // We do need to preserve an insertion point.
845    EmitBlock(createBasicBlock("longjmp.cont"));
846
847    return RValue::get(0);
848  }
849  case Builtin::BI__sync_fetch_and_add:
850  case Builtin::BI__sync_fetch_and_sub:
851  case Builtin::BI__sync_fetch_and_or:
852  case Builtin::BI__sync_fetch_and_and:
853  case Builtin::BI__sync_fetch_and_xor:
854  case Builtin::BI__sync_add_and_fetch:
855  case Builtin::BI__sync_sub_and_fetch:
856  case Builtin::BI__sync_and_and_fetch:
857  case Builtin::BI__sync_or_and_fetch:
858  case Builtin::BI__sync_xor_and_fetch:
859  case Builtin::BI__sync_val_compare_and_swap:
860  case Builtin::BI__sync_bool_compare_and_swap:
861  case Builtin::BI__sync_lock_test_and_set:
862  case Builtin::BI__sync_lock_release:
863  case Builtin::BI__sync_swap:
864    llvm_unreachable("Shouldn't make it through sema");
865  case Builtin::BI__sync_fetch_and_add_1:
866  case Builtin::BI__sync_fetch_and_add_2:
867  case Builtin::BI__sync_fetch_and_add_4:
868  case Builtin::BI__sync_fetch_and_add_8:
869  case Builtin::BI__sync_fetch_and_add_16:
870    return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
871  case Builtin::BI__sync_fetch_and_sub_1:
872  case Builtin::BI__sync_fetch_and_sub_2:
873  case Builtin::BI__sync_fetch_and_sub_4:
874  case Builtin::BI__sync_fetch_and_sub_8:
875  case Builtin::BI__sync_fetch_and_sub_16:
876    return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
877  case Builtin::BI__sync_fetch_and_or_1:
878  case Builtin::BI__sync_fetch_and_or_2:
879  case Builtin::BI__sync_fetch_and_or_4:
880  case Builtin::BI__sync_fetch_and_or_8:
881  case Builtin::BI__sync_fetch_and_or_16:
882    return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
883  case Builtin::BI__sync_fetch_and_and_1:
884  case Builtin::BI__sync_fetch_and_and_2:
885  case Builtin::BI__sync_fetch_and_and_4:
886  case Builtin::BI__sync_fetch_and_and_8:
887  case Builtin::BI__sync_fetch_and_and_16:
888    return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
889  case Builtin::BI__sync_fetch_and_xor_1:
890  case Builtin::BI__sync_fetch_and_xor_2:
891  case Builtin::BI__sync_fetch_and_xor_4:
892  case Builtin::BI__sync_fetch_and_xor_8:
893  case Builtin::BI__sync_fetch_and_xor_16:
894    return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
895
896  // Clang extensions: not overloaded yet.
897  case Builtin::BI__sync_fetch_and_min:
898    return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
899  case Builtin::BI__sync_fetch_and_max:
900    return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
901  case Builtin::BI__sync_fetch_and_umin:
902    return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
903  case Builtin::BI__sync_fetch_and_umax:
904    return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
905
906  case Builtin::BI__sync_add_and_fetch_1:
907  case Builtin::BI__sync_add_and_fetch_2:
908  case Builtin::BI__sync_add_and_fetch_4:
909  case Builtin::BI__sync_add_and_fetch_8:
910  case Builtin::BI__sync_add_and_fetch_16:
911    return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
912                                llvm::Instruction::Add);
913  case Builtin::BI__sync_sub_and_fetch_1:
914  case Builtin::BI__sync_sub_and_fetch_2:
915  case Builtin::BI__sync_sub_and_fetch_4:
916  case Builtin::BI__sync_sub_and_fetch_8:
917  case Builtin::BI__sync_sub_and_fetch_16:
918    return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
919                                llvm::Instruction::Sub);
920  case Builtin::BI__sync_and_and_fetch_1:
921  case Builtin::BI__sync_and_and_fetch_2:
922  case Builtin::BI__sync_and_and_fetch_4:
923  case Builtin::BI__sync_and_and_fetch_8:
924  case Builtin::BI__sync_and_and_fetch_16:
925    return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
926                                llvm::Instruction::And);
927  case Builtin::BI__sync_or_and_fetch_1:
928  case Builtin::BI__sync_or_and_fetch_2:
929  case Builtin::BI__sync_or_and_fetch_4:
930  case Builtin::BI__sync_or_and_fetch_8:
931  case Builtin::BI__sync_or_and_fetch_16:
932    return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
933                                llvm::Instruction::Or);
934  case Builtin::BI__sync_xor_and_fetch_1:
935  case Builtin::BI__sync_xor_and_fetch_2:
936  case Builtin::BI__sync_xor_and_fetch_4:
937  case Builtin::BI__sync_xor_and_fetch_8:
938  case Builtin::BI__sync_xor_and_fetch_16:
939    return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
940                                llvm::Instruction::Xor);
941
942  case Builtin::BI__sync_val_compare_and_swap_1:
943  case Builtin::BI__sync_val_compare_and_swap_2:
944  case Builtin::BI__sync_val_compare_and_swap_4:
945  case Builtin::BI__sync_val_compare_and_swap_8:
946  case Builtin::BI__sync_val_compare_and_swap_16: {
947    QualType T = E->getType();
948    llvm::Value *DestPtr = EmitScalarExpr(E->getArg(0));
949    unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
950
951    llvm::IntegerType *IntType =
952      llvm::IntegerType::get(getLLVMContext(),
953                             getContext().getTypeSize(T));
954    llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
955
956    Value *Args[3];
957    Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType);
958    Args[1] = EmitScalarExpr(E->getArg(1));
959    llvm::Type *ValueType = Args[1]->getType();
960    Args[1] = EmitToInt(*this, Args[1], T, IntType);
961    Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType);
962
963    Value *Result = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2],
964                                                llvm::SequentiallyConsistent);
965    Result = EmitFromInt(*this, Result, T, ValueType);
966    return RValue::get(Result);
967  }
968
969  case Builtin::BI__sync_bool_compare_and_swap_1:
970  case Builtin::BI__sync_bool_compare_and_swap_2:
971  case Builtin::BI__sync_bool_compare_and_swap_4:
972  case Builtin::BI__sync_bool_compare_and_swap_8:
973  case Builtin::BI__sync_bool_compare_and_swap_16: {
974    QualType T = E->getArg(1)->getType();
975    llvm::Value *DestPtr = EmitScalarExpr(E->getArg(0));
976    unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
977
978    llvm::IntegerType *IntType =
979      llvm::IntegerType::get(getLLVMContext(),
980                             getContext().getTypeSize(T));
981    llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
982
983    Value *Args[3];
984    Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType);
985    Args[1] = EmitToInt(*this, EmitScalarExpr(E->getArg(1)), T, IntType);
986    Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType);
987
988    Value *OldVal = Args[1];
989    Value *PrevVal = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2],
990                                                 llvm::SequentiallyConsistent);
991    Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal);
992    // zext bool to int.
993    Result = Builder.CreateZExt(Result, ConvertType(E->getType()));
994    return RValue::get(Result);
995  }
996
997  case Builtin::BI__sync_swap_1:
998  case Builtin::BI__sync_swap_2:
999  case Builtin::BI__sync_swap_4:
1000  case Builtin::BI__sync_swap_8:
1001  case Builtin::BI__sync_swap_16:
1002    return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
1003
1004  case Builtin::BI__sync_lock_test_and_set_1:
1005  case Builtin::BI__sync_lock_test_and_set_2:
1006  case Builtin::BI__sync_lock_test_and_set_4:
1007  case Builtin::BI__sync_lock_test_and_set_8:
1008  case Builtin::BI__sync_lock_test_and_set_16:
1009    return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
1010
1011  case Builtin::BI__sync_lock_release_1:
1012  case Builtin::BI__sync_lock_release_2:
1013  case Builtin::BI__sync_lock_release_4:
1014  case Builtin::BI__sync_lock_release_8:
1015  case Builtin::BI__sync_lock_release_16: {
1016    Value *Ptr = EmitScalarExpr(E->getArg(0));
1017    QualType ElTy = E->getArg(0)->getType()->getPointeeType();
1018    CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
1019    llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
1020                                             StoreSize.getQuantity() * 8);
1021    Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
1022    llvm::StoreInst *Store =
1023      Builder.CreateStore(llvm::Constant::getNullValue(ITy), Ptr);
1024    Store->setAlignment(StoreSize.getQuantity());
1025    Store->setAtomic(llvm::Release);
1026    return RValue::get(0);
1027  }
1028
1029  case Builtin::BI__sync_synchronize: {
1030    // We assume this is supposed to correspond to a C++0x-style
1031    // sequentially-consistent fence (i.e. this is only usable for
1032    // synchonization, not device I/O or anything like that). This intrinsic
1033    // is really badly designed in the sense that in theory, there isn't
1034    // any way to safely use it... but in practice, it mostly works
1035    // to use it with non-atomic loads and stores to get acquire/release
1036    // semantics.
1037    Builder.CreateFence(llvm::SequentiallyConsistent);
1038    return RValue::get(0);
1039  }
1040
1041  case Builtin::BI__c11_atomic_is_lock_free:
1042  case Builtin::BI__atomic_is_lock_free: {
1043    // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
1044    // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
1045    // _Atomic(T) is always properly-aligned.
1046    const char *LibCallName = "__atomic_is_lock_free";
1047    CallArgList Args;
1048    Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
1049             getContext().getSizeType());
1050    if (BuiltinID == Builtin::BI__atomic_is_lock_free)
1051      Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
1052               getContext().VoidPtrTy);
1053    else
1054      Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
1055               getContext().VoidPtrTy);
1056    const CGFunctionInfo &FuncInfo =
1057        CGM.getTypes().arrangeFreeFunctionCall(E->getType(), Args,
1058                                               FunctionType::ExtInfo(),
1059                                               RequiredArgs::All);
1060    llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
1061    llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
1062    return EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
1063  }
1064
1065  case Builtin::BI__atomic_test_and_set: {
1066    // Look at the argument type to determine whether this is a volatile
1067    // operation. The parameter type is always volatile.
1068    QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
1069    bool Volatile =
1070        PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
1071
1072    Value *Ptr = EmitScalarExpr(E->getArg(0));
1073    unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
1074    Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
1075    Value *NewVal = Builder.getInt8(1);
1076    Value *Order = EmitScalarExpr(E->getArg(1));
1077    if (isa<llvm::ConstantInt>(Order)) {
1078      int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1079      AtomicRMWInst *Result = 0;
1080      switch (ord) {
1081      case 0:  // memory_order_relaxed
1082      default: // invalid order
1083        Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
1084                                         Ptr, NewVal,
1085                                         llvm::Monotonic);
1086        break;
1087      case 1:  // memory_order_consume
1088      case 2:  // memory_order_acquire
1089        Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
1090                                         Ptr, NewVal,
1091                                         llvm::Acquire);
1092        break;
1093      case 3:  // memory_order_release
1094        Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
1095                                         Ptr, NewVal,
1096                                         llvm::Release);
1097        break;
1098      case 4:  // memory_order_acq_rel
1099        Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
1100                                         Ptr, NewVal,
1101                                         llvm::AcquireRelease);
1102        break;
1103      case 5:  // memory_order_seq_cst
1104        Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
1105                                         Ptr, NewVal,
1106                                         llvm::SequentiallyConsistent);
1107        break;
1108      }
1109      Result->setVolatile(Volatile);
1110      return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
1111    }
1112
1113    llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1114
1115    llvm::BasicBlock *BBs[5] = {
1116      createBasicBlock("monotonic", CurFn),
1117      createBasicBlock("acquire", CurFn),
1118      createBasicBlock("release", CurFn),
1119      createBasicBlock("acqrel", CurFn),
1120      createBasicBlock("seqcst", CurFn)
1121    };
1122    llvm::AtomicOrdering Orders[5] = {
1123      llvm::Monotonic, llvm::Acquire, llvm::Release,
1124      llvm::AcquireRelease, llvm::SequentiallyConsistent
1125    };
1126
1127    Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1128    llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
1129
1130    Builder.SetInsertPoint(ContBB);
1131    PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
1132
1133    for (unsigned i = 0; i < 5; ++i) {
1134      Builder.SetInsertPoint(BBs[i]);
1135      AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
1136                                                   Ptr, NewVal, Orders[i]);
1137      RMW->setVolatile(Volatile);
1138      Result->addIncoming(RMW, BBs[i]);
1139      Builder.CreateBr(ContBB);
1140    }
1141
1142    SI->addCase(Builder.getInt32(0), BBs[0]);
1143    SI->addCase(Builder.getInt32(1), BBs[1]);
1144    SI->addCase(Builder.getInt32(2), BBs[1]);
1145    SI->addCase(Builder.getInt32(3), BBs[2]);
1146    SI->addCase(Builder.getInt32(4), BBs[3]);
1147    SI->addCase(Builder.getInt32(5), BBs[4]);
1148
1149    Builder.SetInsertPoint(ContBB);
1150    return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
1151  }
1152
1153  case Builtin::BI__atomic_clear: {
1154    QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
1155    bool Volatile =
1156        PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
1157
1158    Value *Ptr = EmitScalarExpr(E->getArg(0));
1159    unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
1160    Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
1161    Value *NewVal = Builder.getInt8(0);
1162    Value *Order = EmitScalarExpr(E->getArg(1));
1163    if (isa<llvm::ConstantInt>(Order)) {
1164      int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1165      StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
1166      Store->setAlignment(1);
1167      switch (ord) {
1168      case 0:  // memory_order_relaxed
1169      default: // invalid order
1170        Store->setOrdering(llvm::Monotonic);
1171        break;
1172      case 3:  // memory_order_release
1173        Store->setOrdering(llvm::Release);
1174        break;
1175      case 5:  // memory_order_seq_cst
1176        Store->setOrdering(llvm::SequentiallyConsistent);
1177        break;
1178      }
1179      return RValue::get(0);
1180    }
1181
1182    llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1183
1184    llvm::BasicBlock *BBs[3] = {
1185      createBasicBlock("monotonic", CurFn),
1186      createBasicBlock("release", CurFn),
1187      createBasicBlock("seqcst", CurFn)
1188    };
1189    llvm::AtomicOrdering Orders[3] = {
1190      llvm::Monotonic, llvm::Release, llvm::SequentiallyConsistent
1191    };
1192
1193    Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1194    llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
1195
1196    for (unsigned i = 0; i < 3; ++i) {
1197      Builder.SetInsertPoint(BBs[i]);
1198      StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
1199      Store->setAlignment(1);
1200      Store->setOrdering(Orders[i]);
1201      Builder.CreateBr(ContBB);
1202    }
1203
1204    SI->addCase(Builder.getInt32(0), BBs[0]);
1205    SI->addCase(Builder.getInt32(3), BBs[1]);
1206    SI->addCase(Builder.getInt32(5), BBs[2]);
1207
1208    Builder.SetInsertPoint(ContBB);
1209    return RValue::get(0);
1210  }
1211
1212  case Builtin::BI__atomic_thread_fence:
1213  case Builtin::BI__atomic_signal_fence:
1214  case Builtin::BI__c11_atomic_thread_fence:
1215  case Builtin::BI__c11_atomic_signal_fence: {
1216    llvm::SynchronizationScope Scope;
1217    if (BuiltinID == Builtin::BI__atomic_signal_fence ||
1218        BuiltinID == Builtin::BI__c11_atomic_signal_fence)
1219      Scope = llvm::SingleThread;
1220    else
1221      Scope = llvm::CrossThread;
1222    Value *Order = EmitScalarExpr(E->getArg(0));
1223    if (isa<llvm::ConstantInt>(Order)) {
1224      int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1225      switch (ord) {
1226      case 0:  // memory_order_relaxed
1227      default: // invalid order
1228        break;
1229      case 1:  // memory_order_consume
1230      case 2:  // memory_order_acquire
1231        Builder.CreateFence(llvm::Acquire, Scope);
1232        break;
1233      case 3:  // memory_order_release
1234        Builder.CreateFence(llvm::Release, Scope);
1235        break;
1236      case 4:  // memory_order_acq_rel
1237        Builder.CreateFence(llvm::AcquireRelease, Scope);
1238        break;
1239      case 5:  // memory_order_seq_cst
1240        Builder.CreateFence(llvm::SequentiallyConsistent, Scope);
1241        break;
1242      }
1243      return RValue::get(0);
1244    }
1245
1246    llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
1247    AcquireBB = createBasicBlock("acquire", CurFn);
1248    ReleaseBB = createBasicBlock("release", CurFn);
1249    AcqRelBB = createBasicBlock("acqrel", CurFn);
1250    SeqCstBB = createBasicBlock("seqcst", CurFn);
1251    llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1252
1253    Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1254    llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
1255
1256    Builder.SetInsertPoint(AcquireBB);
1257    Builder.CreateFence(llvm::Acquire, Scope);
1258    Builder.CreateBr(ContBB);
1259    SI->addCase(Builder.getInt32(1), AcquireBB);
1260    SI->addCase(Builder.getInt32(2), AcquireBB);
1261
1262    Builder.SetInsertPoint(ReleaseBB);
1263    Builder.CreateFence(llvm::Release, Scope);
1264    Builder.CreateBr(ContBB);
1265    SI->addCase(Builder.getInt32(3), ReleaseBB);
1266
1267    Builder.SetInsertPoint(AcqRelBB);
1268    Builder.CreateFence(llvm::AcquireRelease, Scope);
1269    Builder.CreateBr(ContBB);
1270    SI->addCase(Builder.getInt32(4), AcqRelBB);
1271
1272    Builder.SetInsertPoint(SeqCstBB);
1273    Builder.CreateFence(llvm::SequentiallyConsistent, Scope);
1274    Builder.CreateBr(ContBB);
1275    SI->addCase(Builder.getInt32(5), SeqCstBB);
1276
1277    Builder.SetInsertPoint(ContBB);
1278    return RValue::get(0);
1279  }
1280
1281    // Library functions with special handling.
1282  case Builtin::BIsqrt:
1283  case Builtin::BIsqrtf:
1284  case Builtin::BIsqrtl: {
1285    // TODO: there is currently no set of optimizer flags
1286    // sufficient for us to rewrite sqrt to @llvm.sqrt.
1287    // -fmath-errno=0 is not good enough; we need finiteness.
1288    // We could probably precondition the call with an ult
1289    // against 0, but is that worth the complexity?
1290    break;
1291  }
1292
1293  case Builtin::BIpow:
1294  case Builtin::BIpowf:
1295  case Builtin::BIpowl: {
1296    // Transform a call to pow* into a @llvm.pow.* intrinsic call, but only
1297    // if the target agrees.
1298    if (getTargetHooks().emitIntrinsicForPow()) {
1299      if (!FD->hasAttr<ConstAttr>())
1300        break;
1301      Value *Base = EmitScalarExpr(E->getArg(0));
1302      Value *Exponent = EmitScalarExpr(E->getArg(1));
1303      llvm::Type *ArgType = Base->getType();
1304      Value *F = CGM.getIntrinsic(Intrinsic::pow, ArgType);
1305      return RValue::get(Builder.CreateCall2(F, Base, Exponent));
1306    }
1307    break;
1308  }
1309
1310  case Builtin::BIfma:
1311  case Builtin::BIfmaf:
1312  case Builtin::BIfmal:
1313  case Builtin::BI__builtin_fma:
1314  case Builtin::BI__builtin_fmaf:
1315  case Builtin::BI__builtin_fmal: {
1316    // Rewrite fma to intrinsic.
1317    Value *FirstArg = EmitScalarExpr(E->getArg(0));
1318    llvm::Type *ArgType = FirstArg->getType();
1319    Value *F = CGM.getIntrinsic(Intrinsic::fma, ArgType);
1320    return RValue::get(Builder.CreateCall3(F, FirstArg,
1321                                              EmitScalarExpr(E->getArg(1)),
1322                                              EmitScalarExpr(E->getArg(2))));
1323  }
1324
1325  case Builtin::BI__builtin_signbit:
1326  case Builtin::BI__builtin_signbitf:
1327  case Builtin::BI__builtin_signbitl: {
1328    LLVMContext &C = CGM.getLLVMContext();
1329
1330    Value *Arg = EmitScalarExpr(E->getArg(0));
1331    llvm::Type *ArgTy = Arg->getType();
1332    if (ArgTy->isPPC_FP128Ty())
1333      break; // FIXME: I'm not sure what the right implementation is here.
1334    int ArgWidth = ArgTy->getPrimitiveSizeInBits();
1335    llvm::Type *ArgIntTy = llvm::IntegerType::get(C, ArgWidth);
1336    Value *BCArg = Builder.CreateBitCast(Arg, ArgIntTy);
1337    Value *ZeroCmp = llvm::Constant::getNullValue(ArgIntTy);
1338    Value *Result = Builder.CreateICmpSLT(BCArg, ZeroCmp);
1339    return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType())));
1340  }
1341  case Builtin::BI__builtin_annotation: {
1342    llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
1343    llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::annotation,
1344                                      AnnVal->getType());
1345
1346    // Get the annotation string, go through casts. Sema requires this to be a
1347    // non-wide string literal, potentially casted, so the cast<> is safe.
1348    const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
1349    StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
1350    return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc()));
1351  }
1352  case Builtin::BI__builtin_addcb:
1353  case Builtin::BI__builtin_addcs:
1354  case Builtin::BI__builtin_addc:
1355  case Builtin::BI__builtin_addcl:
1356  case Builtin::BI__builtin_addcll:
1357  case Builtin::BI__builtin_subcb:
1358  case Builtin::BI__builtin_subcs:
1359  case Builtin::BI__builtin_subc:
1360  case Builtin::BI__builtin_subcl:
1361  case Builtin::BI__builtin_subcll: {
1362
1363    // We translate all of these builtins from expressions of the form:
1364    //   int x = ..., y = ..., carryin = ..., carryout, result;
1365    //   result = __builtin_addc(x, y, carryin, &carryout);
1366    //
1367    // to LLVM IR of the form:
1368    //
1369    //   %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
1370    //   %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
1371    //   %carry1 = extractvalue {i32, i1} %tmp1, 1
1372    //   %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
1373    //                                                       i32 %carryin)
1374    //   %result = extractvalue {i32, i1} %tmp2, 0
1375    //   %carry2 = extractvalue {i32, i1} %tmp2, 1
1376    //   %tmp3 = or i1 %carry1, %carry2
1377    //   %tmp4 = zext i1 %tmp3 to i32
1378    //   store i32 %tmp4, i32* %carryout
1379
1380    // Scalarize our inputs.
1381    llvm::Value *X = EmitScalarExpr(E->getArg(0));
1382    llvm::Value *Y = EmitScalarExpr(E->getArg(1));
1383    llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
1384    std::pair<llvm::Value*, unsigned> CarryOutPtr =
1385      EmitPointerWithAlignment(E->getArg(3));
1386
1387    // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
1388    llvm::Intrinsic::ID IntrinsicId;
1389    switch (BuiltinID) {
1390    default: llvm_unreachable("Unknown multiprecision builtin id.");
1391    case Builtin::BI__builtin_addcb:
1392    case Builtin::BI__builtin_addcs:
1393    case Builtin::BI__builtin_addc:
1394    case Builtin::BI__builtin_addcl:
1395    case Builtin::BI__builtin_addcll:
1396      IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
1397      break;
1398    case Builtin::BI__builtin_subcb:
1399    case Builtin::BI__builtin_subcs:
1400    case Builtin::BI__builtin_subc:
1401    case Builtin::BI__builtin_subcl:
1402    case Builtin::BI__builtin_subcll:
1403      IntrinsicId = llvm::Intrinsic::usub_with_overflow;
1404      break;
1405    }
1406
1407    // Construct our resulting LLVM IR expression.
1408    llvm::Value *Carry1;
1409    llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
1410                                              X, Y, Carry1);
1411    llvm::Value *Carry2;
1412    llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
1413                                              Sum1, Carryin, Carry2);
1414    llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
1415                                               X->getType());
1416    llvm::StoreInst *CarryOutStore = Builder.CreateStore(CarryOut,
1417                                                         CarryOutPtr.first);
1418    CarryOutStore->setAlignment(CarryOutPtr.second);
1419    return RValue::get(Sum2);
1420  }
1421  case Builtin::BI__builtin_uadd_overflow:
1422  case Builtin::BI__builtin_uaddl_overflow:
1423  case Builtin::BI__builtin_uaddll_overflow:
1424  case Builtin::BI__builtin_usub_overflow:
1425  case Builtin::BI__builtin_usubl_overflow:
1426  case Builtin::BI__builtin_usubll_overflow:
1427  case Builtin::BI__builtin_umul_overflow:
1428  case Builtin::BI__builtin_umull_overflow:
1429  case Builtin::BI__builtin_umulll_overflow:
1430  case Builtin::BI__builtin_sadd_overflow:
1431  case Builtin::BI__builtin_saddl_overflow:
1432  case Builtin::BI__builtin_saddll_overflow:
1433  case Builtin::BI__builtin_ssub_overflow:
1434  case Builtin::BI__builtin_ssubl_overflow:
1435  case Builtin::BI__builtin_ssubll_overflow:
1436  case Builtin::BI__builtin_smul_overflow:
1437  case Builtin::BI__builtin_smull_overflow:
1438  case Builtin::BI__builtin_smulll_overflow: {
1439
1440    // We translate all of these builtins directly to the relevant llvm IR node.
1441
1442    // Scalarize our inputs.
1443    llvm::Value *X = EmitScalarExpr(E->getArg(0));
1444    llvm::Value *Y = EmitScalarExpr(E->getArg(1));
1445    std::pair<llvm::Value *, unsigned> SumOutPtr =
1446      EmitPointerWithAlignment(E->getArg(2));
1447
1448    // Decide which of the overflow intrinsics we are lowering to:
1449    llvm::Intrinsic::ID IntrinsicId;
1450    switch (BuiltinID) {
1451    default: llvm_unreachable("Unknown security overflow builtin id.");
1452    case Builtin::BI__builtin_uadd_overflow:
1453    case Builtin::BI__builtin_uaddl_overflow:
1454    case Builtin::BI__builtin_uaddll_overflow:
1455      IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
1456      break;
1457    case Builtin::BI__builtin_usub_overflow:
1458    case Builtin::BI__builtin_usubl_overflow:
1459    case Builtin::BI__builtin_usubll_overflow:
1460      IntrinsicId = llvm::Intrinsic::usub_with_overflow;
1461      break;
1462    case Builtin::BI__builtin_umul_overflow:
1463    case Builtin::BI__builtin_umull_overflow:
1464    case Builtin::BI__builtin_umulll_overflow:
1465      IntrinsicId = llvm::Intrinsic::umul_with_overflow;
1466      break;
1467    case Builtin::BI__builtin_sadd_overflow:
1468    case Builtin::BI__builtin_saddl_overflow:
1469    case Builtin::BI__builtin_saddll_overflow:
1470      IntrinsicId = llvm::Intrinsic::sadd_with_overflow;
1471      break;
1472    case Builtin::BI__builtin_ssub_overflow:
1473    case Builtin::BI__builtin_ssubl_overflow:
1474    case Builtin::BI__builtin_ssubll_overflow:
1475      IntrinsicId = llvm::Intrinsic::ssub_with_overflow;
1476      break;
1477    case Builtin::BI__builtin_smul_overflow:
1478    case Builtin::BI__builtin_smull_overflow:
1479    case Builtin::BI__builtin_smulll_overflow:
1480      IntrinsicId = llvm::Intrinsic::smul_with_overflow;
1481      break;
1482    }
1483
1484
1485    llvm::Value *Carry;
1486    llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
1487    llvm::StoreInst *SumOutStore = Builder.CreateStore(Sum, SumOutPtr.first);
1488    SumOutStore->setAlignment(SumOutPtr.second);
1489
1490    return RValue::get(Carry);
1491  }
1492  case Builtin::BI__builtin_addressof:
1493    return RValue::get(EmitLValue(E->getArg(0)).getAddress());
1494  case Builtin::BI__noop:
1495    return RValue::get(0);
1496  }
1497
1498  // If this is an alias for a lib function (e.g. __builtin_sin), emit
1499  // the call using the normal call path, but using the unmangled
1500  // version of the function name.
1501  if (getContext().BuiltinInfo.isLibFunction(BuiltinID))
1502    return emitLibraryCall(*this, FD, E,
1503                           CGM.getBuiltinLibFunction(FD, BuiltinID));
1504
1505  // If this is a predefined lib function (e.g. malloc), emit the call
1506  // using exactly the normal call path.
1507  if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
1508    return emitLibraryCall(*this, FD, E, EmitScalarExpr(E->getCallee()));
1509
1510  // See if we have a target specific intrinsic.
1511  const char *Name = getContext().BuiltinInfo.GetName(BuiltinID);
1512  Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
1513  if (const char *Prefix =
1514      llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch()))
1515    IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix, Name);
1516
1517  if (IntrinsicID != Intrinsic::not_intrinsic) {
1518    SmallVector<Value*, 16> Args;
1519
1520    // Find out if any arguments are required to be integer constant
1521    // expressions.
1522    unsigned ICEArguments = 0;
1523    ASTContext::GetBuiltinTypeError Error;
1524    getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
1525    assert(Error == ASTContext::GE_None && "Should not codegen an error");
1526
1527    Function *F = CGM.getIntrinsic(IntrinsicID);
1528    llvm::FunctionType *FTy = F->getFunctionType();
1529
1530    for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
1531      Value *ArgValue;
1532      // If this is a normal argument, just emit it as a scalar.
1533      if ((ICEArguments & (1 << i)) == 0) {
1534        ArgValue = EmitScalarExpr(E->getArg(i));
1535      } else {
1536        // If this is required to be a constant, constant fold it so that we
1537        // know that the generated intrinsic gets a ConstantInt.
1538        llvm::APSInt Result;
1539        bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result,getContext());
1540        assert(IsConst && "Constant arg isn't actually constant?");
1541        (void)IsConst;
1542        ArgValue = llvm::ConstantInt::get(getLLVMContext(), Result);
1543      }
1544
1545      // If the intrinsic arg type is different from the builtin arg type
1546      // we need to do a bit cast.
1547      llvm::Type *PTy = FTy->getParamType(i);
1548      if (PTy != ArgValue->getType()) {
1549        assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
1550               "Must be able to losslessly bit cast to param");
1551        ArgValue = Builder.CreateBitCast(ArgValue, PTy);
1552      }
1553
1554      Args.push_back(ArgValue);
1555    }
1556
1557    Value *V = Builder.CreateCall(F, Args);
1558    QualType BuiltinRetType = E->getType();
1559
1560    llvm::Type *RetTy = VoidTy;
1561    if (!BuiltinRetType->isVoidType())
1562      RetTy = ConvertType(BuiltinRetType);
1563
1564    if (RetTy != V->getType()) {
1565      assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
1566             "Must be able to losslessly bit cast result type");
1567      V = Builder.CreateBitCast(V, RetTy);
1568    }
1569
1570    return RValue::get(V);
1571  }
1572
1573  // See if we have a target specific builtin that needs to be lowered.
1574  if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E))
1575    return RValue::get(V);
1576
1577  ErrorUnsupported(E, "builtin function");
1578
1579  // Unknown builtin, for now just dump it out and return undef.
1580  return GetUndefRValue(E->getType());
1581}
1582
1583Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
1584                                              const CallExpr *E) {
1585  switch (getTarget().getTriple().getArch()) {
1586  case llvm::Triple::aarch64:
1587    return EmitAArch64BuiltinExpr(BuiltinID, E);
1588  case llvm::Triple::arm:
1589  case llvm::Triple::thumb:
1590    return EmitARMBuiltinExpr(BuiltinID, E);
1591  case llvm::Triple::x86:
1592  case llvm::Triple::x86_64:
1593    return EmitX86BuiltinExpr(BuiltinID, E);
1594  case llvm::Triple::ppc:
1595  case llvm::Triple::ppc64:
1596    return EmitPPCBuiltinExpr(BuiltinID, E);
1597  default:
1598    return 0;
1599  }
1600}
1601
1602static llvm::VectorType *GetNeonType(CodeGenFunction *CGF,
1603                                     NeonTypeFlags TypeFlags) {
1604  int IsQuad = TypeFlags.isQuad();
1605  switch (TypeFlags.getEltType()) {
1606  case NeonTypeFlags::Int8:
1607  case NeonTypeFlags::Poly8:
1608    return llvm::VectorType::get(CGF->Int8Ty, 8 << IsQuad);
1609  case NeonTypeFlags::Int16:
1610  case NeonTypeFlags::Poly16:
1611  case NeonTypeFlags::Float16:
1612    return llvm::VectorType::get(CGF->Int16Ty, 4 << IsQuad);
1613  case NeonTypeFlags::Int32:
1614    return llvm::VectorType::get(CGF->Int32Ty, 2 << IsQuad);
1615  case NeonTypeFlags::Int64:
1616    return llvm::VectorType::get(CGF->Int64Ty, 1 << IsQuad);
1617  case NeonTypeFlags::Float32:
1618    return llvm::VectorType::get(CGF->FloatTy, 2 << IsQuad);
1619  }
1620  llvm_unreachable("Invalid NeonTypeFlags element type!");
1621}
1622
1623Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
1624  unsigned nElts = cast<llvm::VectorType>(V->getType())->getNumElements();
1625  Value* SV = llvm::ConstantVector::getSplat(nElts, C);
1626  return Builder.CreateShuffleVector(V, V, SV, "lane");
1627}
1628
1629Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
1630                                     const char *name,
1631                                     unsigned shift, bool rightshift) {
1632  unsigned j = 0;
1633  for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
1634       ai != ae; ++ai, ++j)
1635    if (shift > 0 && shift == j)
1636      Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
1637    else
1638      Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
1639
1640  return Builder.CreateCall(F, Ops, name);
1641}
1642
1643Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
1644                                            bool neg) {
1645  int SV = cast<ConstantInt>(V)->getSExtValue();
1646
1647  llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
1648  llvm::Constant *C = ConstantInt::get(VTy->getElementType(), neg ? -SV : SV);
1649  return llvm::ConstantVector::getSplat(VTy->getNumElements(), C);
1650}
1651
1652/// GetPointeeAlignment - Given an expression with a pointer type, find the
1653/// alignment of the type referenced by the pointer.  Skip over implicit
1654/// casts.
1655std::pair<llvm::Value*, unsigned>
1656CodeGenFunction::EmitPointerWithAlignment(const Expr *Addr) {
1657  assert(Addr->getType()->isPointerType());
1658  Addr = Addr->IgnoreParens();
1659  if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Addr)) {
1660    if ((ICE->getCastKind() == CK_BitCast || ICE->getCastKind() == CK_NoOp) &&
1661        ICE->getSubExpr()->getType()->isPointerType()) {
1662      std::pair<llvm::Value*, unsigned> Ptr =
1663          EmitPointerWithAlignment(ICE->getSubExpr());
1664      Ptr.first = Builder.CreateBitCast(Ptr.first,
1665                                        ConvertType(Addr->getType()));
1666      return Ptr;
1667    } else if (ICE->getCastKind() == CK_ArrayToPointerDecay) {
1668      LValue LV = EmitLValue(ICE->getSubExpr());
1669      unsigned Align = LV.getAlignment().getQuantity();
1670      if (!Align) {
1671        // FIXME: Once LValues are fixed to always set alignment,
1672        // zap this code.
1673        QualType PtTy = ICE->getSubExpr()->getType();
1674        if (!PtTy->isIncompleteType())
1675          Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
1676        else
1677          Align = 1;
1678      }
1679      return std::make_pair(LV.getAddress(), Align);
1680    }
1681  }
1682  if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Addr)) {
1683    if (UO->getOpcode() == UO_AddrOf) {
1684      LValue LV = EmitLValue(UO->getSubExpr());
1685      unsigned Align = LV.getAlignment().getQuantity();
1686      if (!Align) {
1687        // FIXME: Once LValues are fixed to always set alignment,
1688        // zap this code.
1689        QualType PtTy = UO->getSubExpr()->getType();
1690        if (!PtTy->isIncompleteType())
1691          Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
1692        else
1693          Align = 1;
1694      }
1695      return std::make_pair(LV.getAddress(), Align);
1696    }
1697  }
1698
1699  unsigned Align = 1;
1700  QualType PtTy = Addr->getType()->getPointeeType();
1701  if (!PtTy->isIncompleteType())
1702    Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
1703
1704  return std::make_pair(EmitScalarExpr(Addr), Align);
1705}
1706
1707Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
1708                                               const CallExpr *E) {
1709  if (BuiltinID == AArch64::BI__clear_cache) {
1710    assert(E->getNumArgs() == 2 &&
1711           "Variadic __clear_cache slipped through on AArch64");
1712
1713    const FunctionDecl *FD = E->getDirectCallee();
1714    SmallVector<Value *, 2> Ops;
1715    for (unsigned i = 0; i < E->getNumArgs(); i++)
1716      Ops.push_back(EmitScalarExpr(E->getArg(i)));
1717    llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
1718    llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
1719    StringRef Name = FD->getName();
1720    return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
1721  }
1722
1723  return 0;
1724}
1725
1726Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
1727                                           const CallExpr *E) {
1728  if (BuiltinID == ARM::BI__clear_cache) {
1729    assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
1730    const FunctionDecl *FD = E->getDirectCallee();
1731    SmallVector<Value*, 2> Ops;
1732    for (unsigned i = 0; i < 2; i++)
1733      Ops.push_back(EmitScalarExpr(E->getArg(i)));
1734    llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
1735    llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
1736    StringRef Name = FD->getName();
1737    return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
1738  }
1739
1740  if (BuiltinID == ARM::BI__builtin_arm_ldrexd ||
1741      (BuiltinID == ARM::BI__builtin_arm_ldrex &&
1742       getContext().getTypeSize(E->getType()) == 64)) {
1743    Function *F = CGM.getIntrinsic(Intrinsic::arm_ldrexd);
1744
1745    Value *LdPtr = EmitScalarExpr(E->getArg(0));
1746    Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
1747                                    "ldrexd");
1748
1749    Value *Val0 = Builder.CreateExtractValue(Val, 1);
1750    Value *Val1 = Builder.CreateExtractValue(Val, 0);
1751    Val0 = Builder.CreateZExt(Val0, Int64Ty);
1752    Val1 = Builder.CreateZExt(Val1, Int64Ty);
1753
1754    Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32);
1755    Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
1756    Val = Builder.CreateOr(Val, Val1);
1757    return Builder.CreateBitCast(Val, ConvertType(E->getType()));
1758  }
1759
1760  if (BuiltinID == ARM::BI__builtin_arm_ldrex) {
1761    Value *LoadAddr = EmitScalarExpr(E->getArg(0));
1762
1763    QualType Ty = E->getType();
1764    llvm::Type *RealResTy = ConvertType(Ty);
1765    llvm::Type *IntResTy = llvm::IntegerType::get(getLLVMContext(),
1766                                                  getContext().getTypeSize(Ty));
1767    LoadAddr = Builder.CreateBitCast(LoadAddr, IntResTy->getPointerTo());
1768
1769    Function *F = CGM.getIntrinsic(Intrinsic::arm_ldrex, LoadAddr->getType());
1770    Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
1771
1772    if (RealResTy->isPointerTy())
1773      return Builder.CreateIntToPtr(Val, RealResTy);
1774    else {
1775      Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
1776      return Builder.CreateBitCast(Val, RealResTy);
1777    }
1778  }
1779
1780  if (BuiltinID == ARM::BI__builtin_arm_strexd ||
1781      (BuiltinID == ARM::BI__builtin_arm_strex &&
1782       getContext().getTypeSize(E->getArg(0)->getType()) == 64)) {
1783    Function *F = CGM.getIntrinsic(Intrinsic::arm_strexd);
1784    llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, NULL);
1785
1786    Value *One = llvm::ConstantInt::get(Int32Ty, 1);
1787    Value *Tmp = Builder.CreateAlloca(ConvertType(E->getArg(0)->getType()),
1788                                      One);
1789    Value *Val = EmitScalarExpr(E->getArg(0));
1790    Builder.CreateStore(Val, Tmp);
1791
1792    Value *LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
1793    Val = Builder.CreateLoad(LdPtr);
1794
1795    Value *Arg0 = Builder.CreateExtractValue(Val, 0);
1796    Value *Arg1 = Builder.CreateExtractValue(Val, 1);
1797    Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy);
1798    return Builder.CreateCall3(F, Arg0, Arg1, StPtr, "strexd");
1799  }
1800
1801  if (BuiltinID == ARM::BI__builtin_arm_strex) {
1802    Value *StoreVal = EmitScalarExpr(E->getArg(0));
1803    Value *StoreAddr = EmitScalarExpr(E->getArg(1));
1804
1805    QualType Ty = E->getArg(0)->getType();
1806    llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
1807                                                 getContext().getTypeSize(Ty));
1808    StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
1809
1810    if (StoreVal->getType()->isPointerTy())
1811      StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty);
1812    else {
1813      StoreVal = Builder.CreateBitCast(StoreVal, StoreTy);
1814      StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty);
1815    }
1816
1817    Function *F = CGM.getIntrinsic(Intrinsic::arm_strex, StoreAddr->getType());
1818    return Builder.CreateCall2(F, StoreVal, StoreAddr, "strex");
1819  }
1820
1821  if (BuiltinID == ARM::BI__builtin_arm_clrex) {
1822    Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
1823    return Builder.CreateCall(F);
1824  }
1825
1826  SmallVector<Value*, 4> Ops;
1827  llvm::Value *Align = 0;
1828  for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
1829    if (i == 0) {
1830      switch (BuiltinID) {
1831      case ARM::BI__builtin_neon_vld1_v:
1832      case ARM::BI__builtin_neon_vld1q_v:
1833      case ARM::BI__builtin_neon_vld1q_lane_v:
1834      case ARM::BI__builtin_neon_vld1_lane_v:
1835      case ARM::BI__builtin_neon_vld1_dup_v:
1836      case ARM::BI__builtin_neon_vld1q_dup_v:
1837      case ARM::BI__builtin_neon_vst1_v:
1838      case ARM::BI__builtin_neon_vst1q_v:
1839      case ARM::BI__builtin_neon_vst1q_lane_v:
1840      case ARM::BI__builtin_neon_vst1_lane_v:
1841      case ARM::BI__builtin_neon_vst2_v:
1842      case ARM::BI__builtin_neon_vst2q_v:
1843      case ARM::BI__builtin_neon_vst2_lane_v:
1844      case ARM::BI__builtin_neon_vst2q_lane_v:
1845      case ARM::BI__builtin_neon_vst3_v:
1846      case ARM::BI__builtin_neon_vst3q_v:
1847      case ARM::BI__builtin_neon_vst3_lane_v:
1848      case ARM::BI__builtin_neon_vst3q_lane_v:
1849      case ARM::BI__builtin_neon_vst4_v:
1850      case ARM::BI__builtin_neon_vst4q_v:
1851      case ARM::BI__builtin_neon_vst4_lane_v:
1852      case ARM::BI__builtin_neon_vst4q_lane_v:
1853        // Get the alignment for the argument in addition to the value;
1854        // we'll use it later.
1855        std::pair<llvm::Value*, unsigned> Src =
1856            EmitPointerWithAlignment(E->getArg(0));
1857        Ops.push_back(Src.first);
1858        Align = Builder.getInt32(Src.second);
1859        continue;
1860      }
1861    }
1862    if (i == 1) {
1863      switch (BuiltinID) {
1864      case ARM::BI__builtin_neon_vld2_v:
1865      case ARM::BI__builtin_neon_vld2q_v:
1866      case ARM::BI__builtin_neon_vld3_v:
1867      case ARM::BI__builtin_neon_vld3q_v:
1868      case ARM::BI__builtin_neon_vld4_v:
1869      case ARM::BI__builtin_neon_vld4q_v:
1870      case ARM::BI__builtin_neon_vld2_lane_v:
1871      case ARM::BI__builtin_neon_vld2q_lane_v:
1872      case ARM::BI__builtin_neon_vld3_lane_v:
1873      case ARM::BI__builtin_neon_vld3q_lane_v:
1874      case ARM::BI__builtin_neon_vld4_lane_v:
1875      case ARM::BI__builtin_neon_vld4q_lane_v:
1876      case ARM::BI__builtin_neon_vld2_dup_v:
1877      case ARM::BI__builtin_neon_vld3_dup_v:
1878      case ARM::BI__builtin_neon_vld4_dup_v:
1879        // Get the alignment for the argument in addition to the value;
1880        // we'll use it later.
1881        std::pair<llvm::Value*, unsigned> Src =
1882            EmitPointerWithAlignment(E->getArg(1));
1883        Ops.push_back(Src.first);
1884        Align = Builder.getInt32(Src.second);
1885        continue;
1886      }
1887    }
1888    Ops.push_back(EmitScalarExpr(E->getArg(i)));
1889  }
1890
1891  // vget_lane and vset_lane are not overloaded and do not have an extra
1892  // argument that specifies the vector type.
1893  switch (BuiltinID) {
1894  default: break;
1895  case ARM::BI__builtin_neon_vget_lane_i8:
1896  case ARM::BI__builtin_neon_vget_lane_i16:
1897  case ARM::BI__builtin_neon_vget_lane_i32:
1898  case ARM::BI__builtin_neon_vget_lane_i64:
1899  case ARM::BI__builtin_neon_vget_lane_f32:
1900  case ARM::BI__builtin_neon_vgetq_lane_i8:
1901  case ARM::BI__builtin_neon_vgetq_lane_i16:
1902  case ARM::BI__builtin_neon_vgetq_lane_i32:
1903  case ARM::BI__builtin_neon_vgetq_lane_i64:
1904  case ARM::BI__builtin_neon_vgetq_lane_f32:
1905    return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
1906                                        "vget_lane");
1907  case ARM::BI__builtin_neon_vset_lane_i8:
1908  case ARM::BI__builtin_neon_vset_lane_i16:
1909  case ARM::BI__builtin_neon_vset_lane_i32:
1910  case ARM::BI__builtin_neon_vset_lane_i64:
1911  case ARM::BI__builtin_neon_vset_lane_f32:
1912  case ARM::BI__builtin_neon_vsetq_lane_i8:
1913  case ARM::BI__builtin_neon_vsetq_lane_i16:
1914  case ARM::BI__builtin_neon_vsetq_lane_i32:
1915  case ARM::BI__builtin_neon_vsetq_lane_i64:
1916  case ARM::BI__builtin_neon_vsetq_lane_f32:
1917    Ops.push_back(EmitScalarExpr(E->getArg(2)));
1918    return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
1919  }
1920
1921  // Get the last argument, which specifies the vector type.
1922  llvm::APSInt Result;
1923  const Expr *Arg = E->getArg(E->getNumArgs()-1);
1924  if (!Arg->isIntegerConstantExpr(Result, getContext()))
1925    return 0;
1926
1927  if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
1928      BuiltinID == ARM::BI__builtin_arm_vcvtr_d) {
1929    // Determine the overloaded type of this builtin.
1930    llvm::Type *Ty;
1931    if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
1932      Ty = FloatTy;
1933    else
1934      Ty = DoubleTy;
1935
1936    // Determine whether this is an unsigned conversion or not.
1937    bool usgn = Result.getZExtValue() == 1;
1938    unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
1939
1940    // Call the appropriate intrinsic.
1941    Function *F = CGM.getIntrinsic(Int, Ty);
1942    return Builder.CreateCall(F, Ops, "vcvtr");
1943  }
1944
1945  // Determine the type of this overloaded NEON intrinsic.
1946  NeonTypeFlags Type(Result.getZExtValue());
1947  bool usgn = Type.isUnsigned();
1948  bool quad = Type.isQuad();
1949  bool rightShift = false;
1950
1951  llvm::VectorType *VTy = GetNeonType(this, Type);
1952  llvm::Type *Ty = VTy;
1953  if (!Ty)
1954    return 0;
1955
1956  unsigned Int;
1957  switch (BuiltinID) {
1958  default: return 0;
1959  case ARM::BI__builtin_neon_vbsl_v:
1960  case ARM::BI__builtin_neon_vbslq_v:
1961    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vbsl, Ty),
1962                        Ops, "vbsl");
1963  case ARM::BI__builtin_neon_vabd_v:
1964  case ARM::BI__builtin_neon_vabdq_v:
1965    Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds;
1966    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
1967  case ARM::BI__builtin_neon_vabs_v:
1968  case ARM::BI__builtin_neon_vabsq_v:
1969    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vabs, Ty),
1970                        Ops, "vabs");
1971  case ARM::BI__builtin_neon_vaddhn_v:
1972    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vaddhn, Ty),
1973                        Ops, "vaddhn");
1974  case ARM::BI__builtin_neon_vcale_v:
1975    std::swap(Ops[0], Ops[1]);
1976  case ARM::BI__builtin_neon_vcage_v: {
1977    Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacged);
1978    return EmitNeonCall(F, Ops, "vcage");
1979  }
1980  case ARM::BI__builtin_neon_vcaleq_v:
1981    std::swap(Ops[0], Ops[1]);
1982  case ARM::BI__builtin_neon_vcageq_v: {
1983    Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgeq);
1984    return EmitNeonCall(F, Ops, "vcage");
1985  }
1986  case ARM::BI__builtin_neon_vcalt_v:
1987    std::swap(Ops[0], Ops[1]);
1988  case ARM::BI__builtin_neon_vcagt_v: {
1989    Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtd);
1990    return EmitNeonCall(F, Ops, "vcagt");
1991  }
1992  case ARM::BI__builtin_neon_vcaltq_v:
1993    std::swap(Ops[0], Ops[1]);
1994  case ARM::BI__builtin_neon_vcagtq_v: {
1995    Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtq);
1996    return EmitNeonCall(F, Ops, "vcagt");
1997  }
1998  case ARM::BI__builtin_neon_vcls_v:
1999  case ARM::BI__builtin_neon_vclsq_v: {
2000    Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcls, Ty);
2001    return EmitNeonCall(F, Ops, "vcls");
2002  }
2003  case ARM::BI__builtin_neon_vclz_v:
2004  case ARM::BI__builtin_neon_vclzq_v: {
2005    // Generate target-independent intrinsic; also need to add second argument
2006    // for whether or not clz of zero is undefined; on ARM it isn't.
2007    Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ty);
2008    Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef()));
2009    return EmitNeonCall(F, Ops, "vclz");
2010  }
2011  case ARM::BI__builtin_neon_vcnt_v:
2012  case ARM::BI__builtin_neon_vcntq_v: {
2013    // generate target-independent intrinsic
2014    Function *F = CGM.getIntrinsic(Intrinsic::ctpop, Ty);
2015    return EmitNeonCall(F, Ops, "vctpop");
2016  }
2017  case ARM::BI__builtin_neon_vcvt_f16_v: {
2018    assert(Type.getEltType() == NeonTypeFlags::Float16 && !quad &&
2019           "unexpected vcvt_f16_v builtin");
2020    Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcvtfp2hf);
2021    return EmitNeonCall(F, Ops, "vcvt");
2022  }
2023  case ARM::BI__builtin_neon_vcvt_f32_f16: {
2024    assert(Type.getEltType() == NeonTypeFlags::Float16 && !quad &&
2025           "unexpected vcvt_f32_f16 builtin");
2026    Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcvthf2fp);
2027    return EmitNeonCall(F, Ops, "vcvt");
2028  }
2029  case ARM::BI__builtin_neon_vcvt_f32_v:
2030  case ARM::BI__builtin_neon_vcvtq_f32_v:
2031    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2032    Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
2033    return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
2034                : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
2035  case ARM::BI__builtin_neon_vcvt_s32_v:
2036  case ARM::BI__builtin_neon_vcvt_u32_v:
2037  case ARM::BI__builtin_neon_vcvtq_s32_v:
2038  case ARM::BI__builtin_neon_vcvtq_u32_v: {
2039    llvm::Type *FloatTy =
2040      GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
2041    Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
2042    return usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
2043                : Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
2044  }
2045  case ARM::BI__builtin_neon_vcvt_n_f32_v:
2046  case ARM::BI__builtin_neon_vcvtq_n_f32_v: {
2047    llvm::Type *FloatTy =
2048      GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
2049    llvm::Type *Tys[2] = { FloatTy, Ty };
2050    Int = usgn ? Intrinsic::arm_neon_vcvtfxu2fp
2051               : Intrinsic::arm_neon_vcvtfxs2fp;
2052    Function *F = CGM.getIntrinsic(Int, Tys);
2053    return EmitNeonCall(F, Ops, "vcvt_n");
2054  }
2055  case ARM::BI__builtin_neon_vcvt_n_s32_v:
2056  case ARM::BI__builtin_neon_vcvt_n_u32_v:
2057  case ARM::BI__builtin_neon_vcvtq_n_s32_v:
2058  case ARM::BI__builtin_neon_vcvtq_n_u32_v: {
2059    llvm::Type *FloatTy =
2060      GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
2061    llvm::Type *Tys[2] = { Ty, FloatTy };
2062    Int = usgn ? Intrinsic::arm_neon_vcvtfp2fxu
2063               : Intrinsic::arm_neon_vcvtfp2fxs;
2064    Function *F = CGM.getIntrinsic(Int, Tys);
2065    return EmitNeonCall(F, Ops, "vcvt_n");
2066  }
2067  case ARM::BI__builtin_neon_vext_v:
2068  case ARM::BI__builtin_neon_vextq_v: {
2069    int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
2070    SmallVector<Constant*, 16> Indices;
2071    for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
2072      Indices.push_back(ConstantInt::get(Int32Ty, i+CV));
2073
2074    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2075    Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
2076    Value *SV = llvm::ConstantVector::get(Indices);
2077    return Builder.CreateShuffleVector(Ops[0], Ops[1], SV, "vext");
2078  }
2079  case ARM::BI__builtin_neon_vhadd_v:
2080  case ARM::BI__builtin_neon_vhaddq_v:
2081    Int = usgn ? Intrinsic::arm_neon_vhaddu : Intrinsic::arm_neon_vhadds;
2082    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vhadd");
2083  case ARM::BI__builtin_neon_vhsub_v:
2084  case ARM::BI__builtin_neon_vhsubq_v:
2085    Int = usgn ? Intrinsic::arm_neon_vhsubu : Intrinsic::arm_neon_vhsubs;
2086    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vhsub");
2087  case ARM::BI__builtin_neon_vld1_v:
2088  case ARM::BI__builtin_neon_vld1q_v:
2089    Ops.push_back(Align);
2090    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty),
2091                        Ops, "vld1");
2092  case ARM::BI__builtin_neon_vld1q_lane_v:
2093    // Handle 64-bit integer elements as a special case.  Use shuffles of
2094    // one-element vectors to avoid poor code for i64 in the backend.
2095    if (VTy->getElementType()->isIntegerTy(64)) {
2096      // Extract the other lane.
2097      Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
2098      int Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
2099      Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane));
2100      Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
2101      // Load the value as a one-element vector.
2102      Ty = llvm::VectorType::get(VTy->getElementType(), 1);
2103      Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty);
2104      Value *Ld = Builder.CreateCall2(F, Ops[0], Align);
2105      // Combine them.
2106      SmallVector<Constant*, 2> Indices;
2107      Indices.push_back(ConstantInt::get(Int32Ty, 1-Lane));
2108      Indices.push_back(ConstantInt::get(Int32Ty, Lane));
2109      SV = llvm::ConstantVector::get(Indices);
2110      return Builder.CreateShuffleVector(Ops[1], Ld, SV, "vld1q_lane");
2111    }
2112    // fall through
2113  case ARM::BI__builtin_neon_vld1_lane_v: {
2114    Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
2115    Ty = llvm::PointerType::getUnqual(VTy->getElementType());
2116    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2117    LoadInst *Ld = Builder.CreateLoad(Ops[0]);
2118    Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
2119    return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
2120  }
2121  case ARM::BI__builtin_neon_vld1_dup_v:
2122  case ARM::BI__builtin_neon_vld1q_dup_v: {
2123    Value *V = UndefValue::get(Ty);
2124    Ty = llvm::PointerType::getUnqual(VTy->getElementType());
2125    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2126    LoadInst *Ld = Builder.CreateLoad(Ops[0]);
2127    Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
2128    llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
2129    Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
2130    return EmitNeonSplat(Ops[0], CI);
2131  }
2132  case ARM::BI__builtin_neon_vld2_v:
2133  case ARM::BI__builtin_neon_vld2q_v: {
2134    Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2, Ty);
2135    Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld2");
2136    Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
2137    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2138    return Builder.CreateStore(Ops[1], Ops[0]);
2139  }
2140  case ARM::BI__builtin_neon_vld3_v:
2141  case ARM::BI__builtin_neon_vld3q_v: {
2142    Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3, Ty);
2143    Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld3");
2144    Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
2145    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2146    return Builder.CreateStore(Ops[1], Ops[0]);
2147  }
2148  case ARM::BI__builtin_neon_vld4_v:
2149  case ARM::BI__builtin_neon_vld4q_v: {
2150    Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4, Ty);
2151    Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld4");
2152    Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
2153    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2154    return Builder.CreateStore(Ops[1], Ops[0]);
2155  }
2156  case ARM::BI__builtin_neon_vld2_lane_v:
2157  case ARM::BI__builtin_neon_vld2q_lane_v: {
2158    Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2lane, Ty);
2159    Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
2160    Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
2161    Ops.push_back(Align);
2162    Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
2163    Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
2164    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2165    return Builder.CreateStore(Ops[1], Ops[0]);
2166  }
2167  case ARM::BI__builtin_neon_vld3_lane_v:
2168  case ARM::BI__builtin_neon_vld3q_lane_v: {
2169    Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3lane, Ty);
2170    Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
2171    Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
2172    Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
2173    Ops.push_back(Align);
2174    Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
2175    Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
2176    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2177    return Builder.CreateStore(Ops[1], Ops[0]);
2178  }
2179  case ARM::BI__builtin_neon_vld4_lane_v:
2180  case ARM::BI__builtin_neon_vld4q_lane_v: {
2181    Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4lane, Ty);
2182    Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
2183    Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
2184    Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
2185    Ops[5] = Builder.CreateBitCast(Ops[5], Ty);
2186    Ops.push_back(Align);
2187    Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
2188    Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
2189    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2190    return Builder.CreateStore(Ops[1], Ops[0]);
2191  }
2192  case ARM::BI__builtin_neon_vld2_dup_v:
2193  case ARM::BI__builtin_neon_vld3_dup_v:
2194  case ARM::BI__builtin_neon_vld4_dup_v: {
2195    // Handle 64-bit elements as a special-case.  There is no "dup" needed.
2196    if (VTy->getElementType()->getPrimitiveSizeInBits() == 64) {
2197      switch (BuiltinID) {
2198      case ARM::BI__builtin_neon_vld2_dup_v:
2199        Int = Intrinsic::arm_neon_vld2;
2200        break;
2201      case ARM::BI__builtin_neon_vld3_dup_v:
2202        Int = Intrinsic::arm_neon_vld3;
2203        break;
2204      case ARM::BI__builtin_neon_vld4_dup_v:
2205        Int = Intrinsic::arm_neon_vld4;
2206        break;
2207      default: llvm_unreachable("unknown vld_dup intrinsic?");
2208      }
2209      Function *F = CGM.getIntrinsic(Int, Ty);
2210      Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld_dup");
2211      Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
2212      Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2213      return Builder.CreateStore(Ops[1], Ops[0]);
2214    }
2215    switch (BuiltinID) {
2216    case ARM::BI__builtin_neon_vld2_dup_v:
2217      Int = Intrinsic::arm_neon_vld2lane;
2218      break;
2219    case ARM::BI__builtin_neon_vld3_dup_v:
2220      Int = Intrinsic::arm_neon_vld3lane;
2221      break;
2222    case ARM::BI__builtin_neon_vld4_dup_v:
2223      Int = Intrinsic::arm_neon_vld4lane;
2224      break;
2225    default: llvm_unreachable("unknown vld_dup intrinsic?");
2226    }
2227    Function *F = CGM.getIntrinsic(Int, Ty);
2228    llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType());
2229
2230    SmallVector<Value*, 6> Args;
2231    Args.push_back(Ops[1]);
2232    Args.append(STy->getNumElements(), UndefValue::get(Ty));
2233
2234    llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
2235    Args.push_back(CI);
2236    Args.push_back(Align);
2237
2238    Ops[1] = Builder.CreateCall(F, Args, "vld_dup");
2239    // splat lane 0 to all elts in each vector of the result.
2240    for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2241      Value *Val = Builder.CreateExtractValue(Ops[1], i);
2242      Value *Elt = Builder.CreateBitCast(Val, Ty);
2243      Elt = EmitNeonSplat(Elt, CI);
2244      Elt = Builder.CreateBitCast(Elt, Val->getType());
2245      Ops[1] = Builder.CreateInsertValue(Ops[1], Elt, i);
2246    }
2247    Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
2248    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2249    return Builder.CreateStore(Ops[1], Ops[0]);
2250  }
2251  case ARM::BI__builtin_neon_vmax_v:
2252  case ARM::BI__builtin_neon_vmaxq_v:
2253    Int = usgn ? Intrinsic::arm_neon_vmaxu : Intrinsic::arm_neon_vmaxs;
2254    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
2255  case ARM::BI__builtin_neon_vmin_v:
2256  case ARM::BI__builtin_neon_vminq_v:
2257    Int = usgn ? Intrinsic::arm_neon_vminu : Intrinsic::arm_neon_vmins;
2258    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
2259  case ARM::BI__builtin_neon_vmovl_v: {
2260    llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
2261    Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
2262    if (usgn)
2263      return Builder.CreateZExt(Ops[0], Ty, "vmovl");
2264    return Builder.CreateSExt(Ops[0], Ty, "vmovl");
2265  }
2266  case ARM::BI__builtin_neon_vmovn_v: {
2267    llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy);
2268    Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
2269    return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
2270  }
2271  case ARM::BI__builtin_neon_vmul_v:
2272  case ARM::BI__builtin_neon_vmulq_v:
2273    assert(Type.isPoly() && "vmul builtin only supported for polynomial types");
2274    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmulp, Ty),
2275                        Ops, "vmul");
2276  case ARM::BI__builtin_neon_vmull_v:
2277    Int = usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
2278    Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
2279    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
2280  case ARM::BI__builtin_neon_vfma_v:
2281  case ARM::BI__builtin_neon_vfmaq_v: {
2282    Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
2283    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2284    Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
2285    Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
2286
2287    // NEON intrinsic puts accumulator first, unlike the LLVM fma.
2288    return Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]);
2289  }
2290  case ARM::BI__builtin_neon_vpadal_v:
2291  case ARM::BI__builtin_neon_vpadalq_v: {
2292    Int = usgn ? Intrinsic::arm_neon_vpadalu : Intrinsic::arm_neon_vpadals;
2293    // The source operand type has twice as many elements of half the size.
2294    unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
2295    llvm::Type *EltTy =
2296      llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
2297    llvm::Type *NarrowTy =
2298      llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
2299    llvm::Type *Tys[2] = { Ty, NarrowTy };
2300    return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpadal");
2301  }
2302  case ARM::BI__builtin_neon_vpadd_v:
2303    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vpadd, Ty),
2304                        Ops, "vpadd");
2305  case ARM::BI__builtin_neon_vpaddl_v:
2306  case ARM::BI__builtin_neon_vpaddlq_v: {
2307    Int = usgn ? Intrinsic::arm_neon_vpaddlu : Intrinsic::arm_neon_vpaddls;
2308    // The source operand type has twice as many elements of half the size.
2309    unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
2310    llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
2311    llvm::Type *NarrowTy =
2312      llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
2313    llvm::Type *Tys[2] = { Ty, NarrowTy };
2314    return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
2315  }
2316  case ARM::BI__builtin_neon_vpmax_v:
2317    Int = usgn ? Intrinsic::arm_neon_vpmaxu : Intrinsic::arm_neon_vpmaxs;
2318    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
2319  case ARM::BI__builtin_neon_vpmin_v:
2320    Int = usgn ? Intrinsic::arm_neon_vpminu : Intrinsic::arm_neon_vpmins;
2321    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
2322  case ARM::BI__builtin_neon_vqabs_v:
2323  case ARM::BI__builtin_neon_vqabsq_v:
2324    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqabs, Ty),
2325                        Ops, "vqabs");
2326  case ARM::BI__builtin_neon_vqadd_v:
2327  case ARM::BI__builtin_neon_vqaddq_v:
2328    Int = usgn ? Intrinsic::arm_neon_vqaddu : Intrinsic::arm_neon_vqadds;
2329    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqadd");
2330  case ARM::BI__builtin_neon_vqdmlal_v:
2331    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlal, Ty),
2332                        Ops, "vqdmlal");
2333  case ARM::BI__builtin_neon_vqdmlsl_v:
2334    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlsl, Ty),
2335                        Ops, "vqdmlsl");
2336  case ARM::BI__builtin_neon_vqdmulh_v:
2337  case ARM::BI__builtin_neon_vqdmulhq_v:
2338    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmulh, Ty),
2339                        Ops, "vqdmulh");
2340  case ARM::BI__builtin_neon_vqdmull_v:
2341    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, Ty),
2342                        Ops, "vqdmull");
2343  case ARM::BI__builtin_neon_vqmovn_v:
2344    Int = usgn ? Intrinsic::arm_neon_vqmovnu : Intrinsic::arm_neon_vqmovns;
2345    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqmovn");
2346  case ARM::BI__builtin_neon_vqmovun_v:
2347    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqmovnsu, Ty),
2348                        Ops, "vqdmull");
2349  case ARM::BI__builtin_neon_vqneg_v:
2350  case ARM::BI__builtin_neon_vqnegq_v:
2351    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqneg, Ty),
2352                        Ops, "vqneg");
2353  case ARM::BI__builtin_neon_vqrdmulh_v:
2354  case ARM::BI__builtin_neon_vqrdmulhq_v:
2355    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrdmulh, Ty),
2356                        Ops, "vqrdmulh");
2357  case ARM::BI__builtin_neon_vqrshl_v:
2358  case ARM::BI__builtin_neon_vqrshlq_v:
2359    Int = usgn ? Intrinsic::arm_neon_vqrshiftu : Intrinsic::arm_neon_vqrshifts;
2360    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshl");
2361  case ARM::BI__builtin_neon_vqrshrn_n_v:
2362    Int =
2363      usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
2364    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
2365                        1, true);
2366  case ARM::BI__builtin_neon_vqrshrun_n_v:
2367    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
2368                        Ops, "vqrshrun_n", 1, true);
2369  case ARM::BI__builtin_neon_vqshl_v:
2370  case ARM::BI__builtin_neon_vqshlq_v:
2371    Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts;
2372    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl");
2373  case ARM::BI__builtin_neon_vqshl_n_v:
2374  case ARM::BI__builtin_neon_vqshlq_n_v:
2375    Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts;
2376    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
2377                        1, false);
2378  case ARM::BI__builtin_neon_vqshlu_n_v:
2379  case ARM::BI__builtin_neon_vqshluq_n_v:
2380    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftsu, Ty),
2381                        Ops, "vqshlu", 1, false);
2382  case ARM::BI__builtin_neon_vqshrn_n_v:
2383    Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
2384    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
2385                        1, true);
2386  case ARM::BI__builtin_neon_vqshrun_n_v:
2387    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
2388                        Ops, "vqshrun_n", 1, true);
2389  case ARM::BI__builtin_neon_vqsub_v:
2390  case ARM::BI__builtin_neon_vqsubq_v:
2391    Int = usgn ? Intrinsic::arm_neon_vqsubu : Intrinsic::arm_neon_vqsubs;
2392    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqsub");
2393  case ARM::BI__builtin_neon_vraddhn_v:
2394    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vraddhn, Ty),
2395                        Ops, "vraddhn");
2396  case ARM::BI__builtin_neon_vrecpe_v:
2397  case ARM::BI__builtin_neon_vrecpeq_v:
2398    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty),
2399                        Ops, "vrecpe");
2400  case ARM::BI__builtin_neon_vrecps_v:
2401  case ARM::BI__builtin_neon_vrecpsq_v:
2402    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecps, Ty),
2403                        Ops, "vrecps");
2404  case ARM::BI__builtin_neon_vrhadd_v:
2405  case ARM::BI__builtin_neon_vrhaddq_v:
2406    Int = usgn ? Intrinsic::arm_neon_vrhaddu : Intrinsic::arm_neon_vrhadds;
2407    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrhadd");
2408  case ARM::BI__builtin_neon_vrshl_v:
2409  case ARM::BI__builtin_neon_vrshlq_v:
2410    Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
2411    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshl");
2412  case ARM::BI__builtin_neon_vrshrn_n_v:
2413    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
2414                        Ops, "vrshrn_n", 1, true);
2415  case ARM::BI__builtin_neon_vrshr_n_v:
2416  case ARM::BI__builtin_neon_vrshrq_n_v:
2417    Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
2418    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n", 1, true);
2419  case ARM::BI__builtin_neon_vrsqrte_v:
2420  case ARM::BI__builtin_neon_vrsqrteq_v:
2421    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrte, Ty),
2422                        Ops, "vrsqrte");
2423  case ARM::BI__builtin_neon_vrsqrts_v:
2424  case ARM::BI__builtin_neon_vrsqrtsq_v:
2425    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrts, Ty),
2426                        Ops, "vrsqrts");
2427  case ARM::BI__builtin_neon_vrsra_n_v:
2428  case ARM::BI__builtin_neon_vrsraq_n_v:
2429    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2430    Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
2431    Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
2432    Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
2433    Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Ty), Ops[1], Ops[2]);
2434    return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
2435  case ARM::BI__builtin_neon_vrsubhn_v:
2436    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsubhn, Ty),
2437                        Ops, "vrsubhn");
2438  case ARM::BI__builtin_neon_vshl_v:
2439  case ARM::BI__builtin_neon_vshlq_v:
2440    Int = usgn ? Intrinsic::arm_neon_vshiftu : Intrinsic::arm_neon_vshifts;
2441    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vshl");
2442  case ARM::BI__builtin_neon_vshll_n_v:
2443    Int = usgn ? Intrinsic::arm_neon_vshiftlu : Intrinsic::arm_neon_vshiftls;
2444    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vshll", 1);
2445  case ARM::BI__builtin_neon_vshl_n_v:
2446  case ARM::BI__builtin_neon_vshlq_n_v:
2447    Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
2448    return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
2449                             "vshl_n");
2450  case ARM::BI__builtin_neon_vshrn_n_v:
2451    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftn, Ty),
2452                        Ops, "vshrn_n", 1, true);
2453  case ARM::BI__builtin_neon_vshr_n_v:
2454  case ARM::BI__builtin_neon_vshrq_n_v:
2455    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2456    Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
2457    if (usgn)
2458      return Builder.CreateLShr(Ops[0], Ops[1], "vshr_n");
2459    else
2460      return Builder.CreateAShr(Ops[0], Ops[1], "vshr_n");
2461  case ARM::BI__builtin_neon_vsri_n_v:
2462  case ARM::BI__builtin_neon_vsriq_n_v:
2463    rightShift = true;
2464  case ARM::BI__builtin_neon_vsli_n_v:
2465  case ARM::BI__builtin_neon_vsliq_n_v:
2466    Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
2467    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty),
2468                        Ops, "vsli_n");
2469  case ARM::BI__builtin_neon_vsra_n_v:
2470  case ARM::BI__builtin_neon_vsraq_n_v:
2471    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2472    Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
2473    Ops[2] = EmitNeonShiftVector(Ops[2], Ty, false);
2474    if (usgn)
2475      Ops[1] = Builder.CreateLShr(Ops[1], Ops[2], "vsra_n");
2476    else
2477      Ops[1] = Builder.CreateAShr(Ops[1], Ops[2], "vsra_n");
2478    return Builder.CreateAdd(Ops[0], Ops[1]);
2479  case ARM::BI__builtin_neon_vst1_v:
2480  case ARM::BI__builtin_neon_vst1q_v:
2481    Ops.push_back(Align);
2482    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, Ty),
2483                        Ops, "");
2484  case ARM::BI__builtin_neon_vst1q_lane_v:
2485    // Handle 64-bit integer elements as a special case.  Use a shuffle to get
2486    // a one-element vector and avoid poor code for i64 in the backend.
2487    if (VTy->getElementType()->isIntegerTy(64)) {
2488      Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
2489      Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2]));
2490      Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
2491      Ops[2] = Align;
2492      return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
2493                                                 Ops[1]->getType()), Ops);
2494    }
2495    // fall through
2496  case ARM::BI__builtin_neon_vst1_lane_v: {
2497    Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
2498    Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
2499    Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
2500    StoreInst *St = Builder.CreateStore(Ops[1],
2501                                        Builder.CreateBitCast(Ops[0], Ty));
2502    St->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
2503    return St;
2504  }
2505  case ARM::BI__builtin_neon_vst2_v:
2506  case ARM::BI__builtin_neon_vst2q_v:
2507    Ops.push_back(Align);
2508    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2, Ty),
2509                        Ops, "");
2510  case ARM::BI__builtin_neon_vst2_lane_v:
2511  case ARM::BI__builtin_neon_vst2q_lane_v:
2512    Ops.push_back(Align);
2513    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2lane, Ty),
2514                        Ops, "");
2515  case ARM::BI__builtin_neon_vst3_v:
2516  case ARM::BI__builtin_neon_vst3q_v:
2517    Ops.push_back(Align);
2518    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3, Ty),
2519                        Ops, "");
2520  case ARM::BI__builtin_neon_vst3_lane_v:
2521  case ARM::BI__builtin_neon_vst3q_lane_v:
2522    Ops.push_back(Align);
2523    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3lane, Ty),
2524                        Ops, "");
2525  case ARM::BI__builtin_neon_vst4_v:
2526  case ARM::BI__builtin_neon_vst4q_v:
2527    Ops.push_back(Align);
2528    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4, Ty),
2529                        Ops, "");
2530  case ARM::BI__builtin_neon_vst4_lane_v:
2531  case ARM::BI__builtin_neon_vst4q_lane_v:
2532    Ops.push_back(Align);
2533    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4lane, Ty),
2534                        Ops, "");
2535  case ARM::BI__builtin_neon_vsubhn_v:
2536    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vsubhn, Ty),
2537                        Ops, "vsubhn");
2538  case ARM::BI__builtin_neon_vtbl1_v:
2539    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
2540                        Ops, "vtbl1");
2541  case ARM::BI__builtin_neon_vtbl2_v:
2542    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2),
2543                        Ops, "vtbl2");
2544  case ARM::BI__builtin_neon_vtbl3_v:
2545    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3),
2546                        Ops, "vtbl3");
2547  case ARM::BI__builtin_neon_vtbl4_v:
2548    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4),
2549                        Ops, "vtbl4");
2550  case ARM::BI__builtin_neon_vtbx1_v:
2551    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1),
2552                        Ops, "vtbx1");
2553  case ARM::BI__builtin_neon_vtbx2_v:
2554    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2),
2555                        Ops, "vtbx2");
2556  case ARM::BI__builtin_neon_vtbx3_v:
2557    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3),
2558                        Ops, "vtbx3");
2559  case ARM::BI__builtin_neon_vtbx4_v:
2560    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4),
2561                        Ops, "vtbx4");
2562  case ARM::BI__builtin_neon_vtst_v:
2563  case ARM::BI__builtin_neon_vtstq_v: {
2564    Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2565    Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
2566    Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
2567    Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
2568                                ConstantAggregateZero::get(Ty));
2569    return Builder.CreateSExt(Ops[0], Ty, "vtst");
2570  }
2571  case ARM::BI__builtin_neon_vtrn_v:
2572  case ARM::BI__builtin_neon_vtrnq_v: {
2573    Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
2574    Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
2575    Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
2576    Value *SV = 0;
2577
2578    for (unsigned vi = 0; vi != 2; ++vi) {
2579      SmallVector<Constant*, 16> Indices;
2580      for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
2581        Indices.push_back(Builder.getInt32(i+vi));
2582        Indices.push_back(Builder.getInt32(i+e+vi));
2583      }
2584      Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
2585      SV = llvm::ConstantVector::get(Indices);
2586      SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn");
2587      SV = Builder.CreateStore(SV, Addr);
2588    }
2589    return SV;
2590  }
2591  case ARM::BI__builtin_neon_vuzp_v:
2592  case ARM::BI__builtin_neon_vuzpq_v: {
2593    Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
2594    Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
2595    Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
2596    Value *SV = 0;
2597
2598    for (unsigned vi = 0; vi != 2; ++vi) {
2599      SmallVector<Constant*, 16> Indices;
2600      for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
2601        Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi));
2602
2603      Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
2604      SV = llvm::ConstantVector::get(Indices);
2605      SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp");
2606      SV = Builder.CreateStore(SV, Addr);
2607    }
2608    return SV;
2609  }
2610  case ARM::BI__builtin_neon_vzip_v:
2611  case ARM::BI__builtin_neon_vzipq_v: {
2612    Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
2613    Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
2614    Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
2615    Value *SV = 0;
2616
2617    for (unsigned vi = 0; vi != 2; ++vi) {
2618      SmallVector<Constant*, 16> Indices;
2619      for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
2620        Indices.push_back(ConstantInt::get(Int32Ty, (i + vi*e) >> 1));
2621        Indices.push_back(ConstantInt::get(Int32Ty, ((i + vi*e) >> 1)+e));
2622      }
2623      Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
2624      SV = llvm::ConstantVector::get(Indices);
2625      SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip");
2626      SV = Builder.CreateStore(SV, Addr);
2627    }
2628    return SV;
2629  }
2630  }
2631}
2632
2633llvm::Value *CodeGenFunction::
2634BuildVector(ArrayRef<llvm::Value*> Ops) {
2635  assert((Ops.size() & (Ops.size() - 1)) == 0 &&
2636         "Not a power-of-two sized vector!");
2637  bool AllConstants = true;
2638  for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
2639    AllConstants &= isa<Constant>(Ops[i]);
2640
2641  // If this is a constant vector, create a ConstantVector.
2642  if (AllConstants) {
2643    SmallVector<llvm::Constant*, 16> CstOps;
2644    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2645      CstOps.push_back(cast<Constant>(Ops[i]));
2646    return llvm::ConstantVector::get(CstOps);
2647  }
2648
2649  // Otherwise, insertelement the values to build the vector.
2650  Value *Result =
2651    llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), Ops.size()));
2652
2653  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2654    Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i));
2655
2656  return Result;
2657}
2658
2659Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
2660                                           const CallExpr *E) {
2661  SmallVector<Value*, 4> Ops;
2662
2663  // Find out if any arguments are required to be integer constant expressions.
2664  unsigned ICEArguments = 0;
2665  ASTContext::GetBuiltinTypeError Error;
2666  getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
2667  assert(Error == ASTContext::GE_None && "Should not codegen an error");
2668
2669  for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
2670    // If this is a normal argument, just emit it as a scalar.
2671    if ((ICEArguments & (1 << i)) == 0) {
2672      Ops.push_back(EmitScalarExpr(E->getArg(i)));
2673      continue;
2674    }
2675
2676    // If this is required to be a constant, constant fold it so that we know
2677    // that the generated intrinsic gets a ConstantInt.
2678    llvm::APSInt Result;
2679    bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
2680    assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst;
2681    Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
2682  }
2683
2684  switch (BuiltinID) {
2685  default: return 0;
2686  case X86::BI__builtin_ia32_vec_init_v8qi:
2687  case X86::BI__builtin_ia32_vec_init_v4hi:
2688  case X86::BI__builtin_ia32_vec_init_v2si:
2689    return Builder.CreateBitCast(BuildVector(Ops),
2690                                 llvm::Type::getX86_MMXTy(getLLVMContext()));
2691  case X86::BI__builtin_ia32_vec_ext_v2si:
2692    return Builder.CreateExtractElement(Ops[0],
2693                                  llvm::ConstantInt::get(Ops[1]->getType(), 0));
2694  case X86::BI__builtin_ia32_ldmxcsr: {
2695    llvm::Type *PtrTy = Int8PtrTy;
2696    Value *One = llvm::ConstantInt::get(Int32Ty, 1);
2697    Value *Tmp = Builder.CreateAlloca(Int32Ty, One);
2698    Builder.CreateStore(Ops[0], Tmp);
2699    return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
2700                              Builder.CreateBitCast(Tmp, PtrTy));
2701  }
2702  case X86::BI__builtin_ia32_stmxcsr: {
2703    llvm::Type *PtrTy = Int8PtrTy;
2704    Value *One = llvm::ConstantInt::get(Int32Ty, 1);
2705    Value *Tmp = Builder.CreateAlloca(Int32Ty, One);
2706    Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
2707                       Builder.CreateBitCast(Tmp, PtrTy));
2708    return Builder.CreateLoad(Tmp, "stmxcsr");
2709  }
2710  case X86::BI__builtin_ia32_storehps:
2711  case X86::BI__builtin_ia32_storelps: {
2712    llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty);
2713    llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
2714
2715    // cast val v2i64
2716    Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast");
2717
2718    // extract (0, 1)
2719    unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1;
2720    llvm::Value *Idx = llvm::ConstantInt::get(Int32Ty, Index);
2721    Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract");
2722
2723    // cast pointer to i64 & store
2724    Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy);
2725    return Builder.CreateStore(Ops[1], Ops[0]);
2726  }
2727  case X86::BI__builtin_ia32_palignr: {
2728    unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
2729
2730    // If palignr is shifting the pair of input vectors less than 9 bytes,
2731    // emit a shuffle instruction.
2732    if (shiftVal <= 8) {
2733      SmallVector<llvm::Constant*, 8> Indices;
2734      for (unsigned i = 0; i != 8; ++i)
2735        Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i));
2736
2737      Value* SV = llvm::ConstantVector::get(Indices);
2738      return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
2739    }
2740
2741    // If palignr is shifting the pair of input vectors more than 8 but less
2742    // than 16 bytes, emit a logical right shift of the destination.
2743    if (shiftVal < 16) {
2744      // MMX has these as 1 x i64 vectors for some odd optimization reasons.
2745      llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 1);
2746
2747      Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
2748      Ops[1] = llvm::ConstantInt::get(VecTy, (shiftVal-8) * 8);
2749
2750      // create i32 constant
2751      llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_mmx_psrl_q);
2752      return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
2753    }
2754
2755    // If palignr is shifting the pair of vectors more than 16 bytes, emit zero.
2756    return llvm::Constant::getNullValue(ConvertType(E->getType()));
2757  }
2758  case X86::BI__builtin_ia32_palignr128: {
2759    unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
2760
2761    // If palignr is shifting the pair of input vectors less than 17 bytes,
2762    // emit a shuffle instruction.
2763    if (shiftVal <= 16) {
2764      SmallVector<llvm::Constant*, 16> Indices;
2765      for (unsigned i = 0; i != 16; ++i)
2766        Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i));
2767
2768      Value* SV = llvm::ConstantVector::get(Indices);
2769      return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
2770    }
2771
2772    // If palignr is shifting the pair of input vectors more than 16 but less
2773    // than 32 bytes, emit a logical right shift of the destination.
2774    if (shiftVal < 32) {
2775      llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
2776
2777      Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
2778      Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8);
2779
2780      // create i32 constant
2781      llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq);
2782      return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
2783    }
2784
2785    // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
2786    return llvm::Constant::getNullValue(ConvertType(E->getType()));
2787  }
2788  case X86::BI__builtin_ia32_palignr256: {
2789    unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
2790
2791    // If palignr is shifting the pair of input vectors less than 17 bytes,
2792    // emit a shuffle instruction.
2793    if (shiftVal <= 16) {
2794      SmallVector<llvm::Constant*, 32> Indices;
2795      // 256-bit palignr operates on 128-bit lanes so we need to handle that
2796      for (unsigned l = 0; l != 2; ++l) {
2797        unsigned LaneStart = l * 16;
2798        unsigned LaneEnd = (l+1) * 16;
2799        for (unsigned i = 0; i != 16; ++i) {
2800          unsigned Idx = shiftVal + i + LaneStart;
2801          if (Idx >= LaneEnd) Idx += 16; // end of lane, switch operand
2802          Indices.push_back(llvm::ConstantInt::get(Int32Ty, Idx));
2803        }
2804      }
2805
2806      Value* SV = llvm::ConstantVector::get(Indices);
2807      return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
2808    }
2809
2810    // If palignr is shifting the pair of input vectors more than 16 but less
2811    // than 32 bytes, emit a logical right shift of the destination.
2812    if (shiftVal < 32) {
2813      llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 4);
2814
2815      Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
2816      Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8);
2817
2818      // create i32 constant
2819      llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_avx2_psrl_dq);
2820      return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
2821    }
2822
2823    // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
2824    return llvm::Constant::getNullValue(ConvertType(E->getType()));
2825  }
2826  case X86::BI__builtin_ia32_movntps:
2827  case X86::BI__builtin_ia32_movntps256:
2828  case X86::BI__builtin_ia32_movntpd:
2829  case X86::BI__builtin_ia32_movntpd256:
2830  case X86::BI__builtin_ia32_movntdq:
2831  case X86::BI__builtin_ia32_movntdq256:
2832  case X86::BI__builtin_ia32_movnti: {
2833    llvm::MDNode *Node = llvm::MDNode::get(getLLVMContext(),
2834                                           Builder.getInt32(1));
2835
2836    // Convert the type of the pointer to a pointer to the stored type.
2837    Value *BC = Builder.CreateBitCast(Ops[0],
2838                                llvm::PointerType::getUnqual(Ops[1]->getType()),
2839                                      "cast");
2840    StoreInst *SI = Builder.CreateStore(Ops[1], BC);
2841    SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
2842    SI->setAlignment(16);
2843    return SI;
2844  }
2845  // 3DNow!
2846  case X86::BI__builtin_ia32_pswapdsf:
2847  case X86::BI__builtin_ia32_pswapdsi: {
2848    const char *name = 0;
2849    Intrinsic::ID ID = Intrinsic::not_intrinsic;
2850    switch(BuiltinID) {
2851    default: llvm_unreachable("Unsupported intrinsic!");
2852    case X86::BI__builtin_ia32_pswapdsf:
2853    case X86::BI__builtin_ia32_pswapdsi:
2854      name = "pswapd";
2855      ID = Intrinsic::x86_3dnowa_pswapd;
2856      break;
2857    }
2858    llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext());
2859    Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast");
2860    llvm::Function *F = CGM.getIntrinsic(ID);
2861    return Builder.CreateCall(F, Ops, name);
2862  }
2863  case X86::BI__builtin_ia32_rdrand16_step:
2864  case X86::BI__builtin_ia32_rdrand32_step:
2865  case X86::BI__builtin_ia32_rdrand64_step:
2866  case X86::BI__builtin_ia32_rdseed16_step:
2867  case X86::BI__builtin_ia32_rdseed32_step:
2868  case X86::BI__builtin_ia32_rdseed64_step: {
2869    Intrinsic::ID ID;
2870    switch (BuiltinID) {
2871    default: llvm_unreachable("Unsupported intrinsic!");
2872    case X86::BI__builtin_ia32_rdrand16_step:
2873      ID = Intrinsic::x86_rdrand_16;
2874      break;
2875    case X86::BI__builtin_ia32_rdrand32_step:
2876      ID = Intrinsic::x86_rdrand_32;
2877      break;
2878    case X86::BI__builtin_ia32_rdrand64_step:
2879      ID = Intrinsic::x86_rdrand_64;
2880      break;
2881    case X86::BI__builtin_ia32_rdseed16_step:
2882      ID = Intrinsic::x86_rdseed_16;
2883      break;
2884    case X86::BI__builtin_ia32_rdseed32_step:
2885      ID = Intrinsic::x86_rdseed_32;
2886      break;
2887    case X86::BI__builtin_ia32_rdseed64_step:
2888      ID = Intrinsic::x86_rdseed_64;
2889      break;
2890    }
2891
2892    Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID));
2893    Builder.CreateStore(Builder.CreateExtractValue(Call, 0), Ops[0]);
2894    return Builder.CreateExtractValue(Call, 1);
2895  }
2896  }
2897}
2898
2899
2900Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
2901                                           const CallExpr *E) {
2902  SmallVector<Value*, 4> Ops;
2903
2904  for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
2905    Ops.push_back(EmitScalarExpr(E->getArg(i)));
2906
2907  Intrinsic::ID ID = Intrinsic::not_intrinsic;
2908
2909  switch (BuiltinID) {
2910  default: return 0;
2911
2912  // vec_ld, vec_lvsl, vec_lvsr
2913  case PPC::BI__builtin_altivec_lvx:
2914  case PPC::BI__builtin_altivec_lvxl:
2915  case PPC::BI__builtin_altivec_lvebx:
2916  case PPC::BI__builtin_altivec_lvehx:
2917  case PPC::BI__builtin_altivec_lvewx:
2918  case PPC::BI__builtin_altivec_lvsl:
2919  case PPC::BI__builtin_altivec_lvsr:
2920  {
2921    Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
2922
2923    Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
2924    Ops.pop_back();
2925
2926    switch (BuiltinID) {
2927    default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!");
2928    case PPC::BI__builtin_altivec_lvx:
2929      ID = Intrinsic::ppc_altivec_lvx;
2930      break;
2931    case PPC::BI__builtin_altivec_lvxl:
2932      ID = Intrinsic::ppc_altivec_lvxl;
2933      break;
2934    case PPC::BI__builtin_altivec_lvebx:
2935      ID = Intrinsic::ppc_altivec_lvebx;
2936      break;
2937    case PPC::BI__builtin_altivec_lvehx:
2938      ID = Intrinsic::ppc_altivec_lvehx;
2939      break;
2940    case PPC::BI__builtin_altivec_lvewx:
2941      ID = Intrinsic::ppc_altivec_lvewx;
2942      break;
2943    case PPC::BI__builtin_altivec_lvsl:
2944      ID = Intrinsic::ppc_altivec_lvsl;
2945      break;
2946    case PPC::BI__builtin_altivec_lvsr:
2947      ID = Intrinsic::ppc_altivec_lvsr;
2948      break;
2949    }
2950    llvm::Function *F = CGM.getIntrinsic(ID);
2951    return Builder.CreateCall(F, Ops, "");
2952  }
2953
2954  // vec_st
2955  case PPC::BI__builtin_altivec_stvx:
2956  case PPC::BI__builtin_altivec_stvxl:
2957  case PPC::BI__builtin_altivec_stvebx:
2958  case PPC::BI__builtin_altivec_stvehx:
2959  case PPC::BI__builtin_altivec_stvewx:
2960  {
2961    Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
2962    Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
2963    Ops.pop_back();
2964
2965    switch (BuiltinID) {
2966    default: llvm_unreachable("Unsupported st intrinsic!");
2967    case PPC::BI__builtin_altivec_stvx:
2968      ID = Intrinsic::ppc_altivec_stvx;
2969      break;
2970    case PPC::BI__builtin_altivec_stvxl:
2971      ID = Intrinsic::ppc_altivec_stvxl;
2972      break;
2973    case PPC::BI__builtin_altivec_stvebx:
2974      ID = Intrinsic::ppc_altivec_stvebx;
2975      break;
2976    case PPC::BI__builtin_altivec_stvehx:
2977      ID = Intrinsic::ppc_altivec_stvehx;
2978      break;
2979    case PPC::BI__builtin_altivec_stvewx:
2980      ID = Intrinsic::ppc_altivec_stvewx;
2981      break;
2982    }
2983    llvm::Function *F = CGM.getIntrinsic(ID);
2984    return Builder.CreateCall(F, Ops, "");
2985  }
2986  }
2987}
2988