CGBuiltin.cpp revision bc5de89da7dbd930c339757b1d01cb926be768fc
1//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This contains code to emit Builtin calls as LLVM code. 11// 12//===----------------------------------------------------------------------===// 13 14#include "CodeGenFunction.h" 15#include "CGObjCRuntime.h" 16#include "CodeGenModule.h" 17#include "TargetInfo.h" 18#include "clang/AST/ASTContext.h" 19#include "clang/AST/Decl.h" 20#include "clang/Basic/TargetBuiltins.h" 21#include "clang/Basic/TargetInfo.h" 22#include "clang/CodeGen/CGFunctionInfo.h" 23#include "llvm/IR/DataLayout.h" 24#include "llvm/IR/Intrinsics.h" 25 26using namespace clang; 27using namespace CodeGen; 28using namespace llvm; 29 30/// getBuiltinLibFunction - Given a builtin id for a function like 31/// "__builtin_fabsf", return a Function* for "fabsf". 32llvm::Value *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD, 33 unsigned BuiltinID) { 34 assert(Context.BuiltinInfo.isLibFunction(BuiltinID)); 35 36 // Get the name, skip over the __builtin_ prefix (if necessary). 37 StringRef Name; 38 GlobalDecl D(FD); 39 40 // If the builtin has been declared explicitly with an assembler label, 41 // use the mangled name. This differs from the plain label on platforms 42 // that prefix labels. 43 if (FD->hasAttr<AsmLabelAttr>()) 44 Name = getMangledName(D); 45 else 46 Name = Context.BuiltinInfo.GetName(BuiltinID) + 10; 47 48 llvm::FunctionType *Ty = 49 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType())); 50 51 return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false); 52} 53 54/// Emit the conversions required to turn the given value into an 55/// integer of the given size. 56static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V, 57 QualType T, llvm::IntegerType *IntType) { 58 V = CGF.EmitToMemory(V, T); 59 60 if (V->getType()->isPointerTy()) 61 return CGF.Builder.CreatePtrToInt(V, IntType); 62 63 assert(V->getType() == IntType); 64 return V; 65} 66 67static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V, 68 QualType T, llvm::Type *ResultType) { 69 V = CGF.EmitFromMemory(V, T); 70 71 if (ResultType->isPointerTy()) 72 return CGF.Builder.CreateIntToPtr(V, ResultType); 73 74 assert(V->getType() == ResultType); 75 return V; 76} 77 78/// Utility to insert an atomic instruction based on Instrinsic::ID 79/// and the expression node. 80static RValue EmitBinaryAtomic(CodeGenFunction &CGF, 81 llvm::AtomicRMWInst::BinOp Kind, 82 const CallExpr *E) { 83 QualType T = E->getType(); 84 assert(E->getArg(0)->getType()->isPointerType()); 85 assert(CGF.getContext().hasSameUnqualifiedType(T, 86 E->getArg(0)->getType()->getPointeeType())); 87 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())); 88 89 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); 90 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); 91 92 llvm::IntegerType *IntType = 93 llvm::IntegerType::get(CGF.getLLVMContext(), 94 CGF.getContext().getTypeSize(T)); 95 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); 96 97 llvm::Value *Args[2]; 98 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); 99 Args[1] = CGF.EmitScalarExpr(E->getArg(1)); 100 llvm::Type *ValueType = Args[1]->getType(); 101 Args[1] = EmitToInt(CGF, Args[1], T, IntType); 102 103 llvm::Value *Result = 104 CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1], 105 llvm::SequentiallyConsistent); 106 Result = EmitFromInt(CGF, Result, T, ValueType); 107 return RValue::get(Result); 108} 109 110/// Utility to insert an atomic instruction based Instrinsic::ID and 111/// the expression node, where the return value is the result of the 112/// operation. 113static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF, 114 llvm::AtomicRMWInst::BinOp Kind, 115 const CallExpr *E, 116 Instruction::BinaryOps Op) { 117 QualType T = E->getType(); 118 assert(E->getArg(0)->getType()->isPointerType()); 119 assert(CGF.getContext().hasSameUnqualifiedType(T, 120 E->getArg(0)->getType()->getPointeeType())); 121 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())); 122 123 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); 124 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); 125 126 llvm::IntegerType *IntType = 127 llvm::IntegerType::get(CGF.getLLVMContext(), 128 CGF.getContext().getTypeSize(T)); 129 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); 130 131 llvm::Value *Args[2]; 132 Args[1] = CGF.EmitScalarExpr(E->getArg(1)); 133 llvm::Type *ValueType = Args[1]->getType(); 134 Args[1] = EmitToInt(CGF, Args[1], T, IntType); 135 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); 136 137 llvm::Value *Result = 138 CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1], 139 llvm::SequentiallyConsistent); 140 Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]); 141 Result = EmitFromInt(CGF, Result, T, ValueType); 142 return RValue::get(Result); 143} 144 145/// EmitFAbs - Emit a call to fabs/fabsf/fabsl, depending on the type of ValTy, 146/// which must be a scalar floating point type. 147static Value *EmitFAbs(CodeGenFunction &CGF, Value *V, QualType ValTy) { 148 const BuiltinType *ValTyP = ValTy->getAs<BuiltinType>(); 149 assert(ValTyP && "isn't scalar fp type!"); 150 151 StringRef FnName; 152 switch (ValTyP->getKind()) { 153 default: llvm_unreachable("Isn't a scalar fp type!"); 154 case BuiltinType::Float: FnName = "fabsf"; break; 155 case BuiltinType::Double: FnName = "fabs"; break; 156 case BuiltinType::LongDouble: FnName = "fabsl"; break; 157 } 158 159 // The prototype is something that takes and returns whatever V's type is. 160 llvm::FunctionType *FT = llvm::FunctionType::get(V->getType(), V->getType(), 161 false); 162 llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction(FT, FnName); 163 164 return CGF.EmitNounwindRuntimeCall(Fn, V, "abs"); 165} 166 167static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *Fn, 168 const CallExpr *E, llvm::Value *calleeValue) { 169 return CGF.EmitCall(E->getCallee()->getType(), calleeValue, E->getLocStart(), 170 ReturnValueSlot(), E->arg_begin(), E->arg_end(), Fn); 171} 172 173/// \brief Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.* 174/// depending on IntrinsicID. 175/// 176/// \arg CGF The current codegen function. 177/// \arg IntrinsicID The ID for the Intrinsic we wish to generate. 178/// \arg X The first argument to the llvm.*.with.overflow.*. 179/// \arg Y The second argument to the llvm.*.with.overflow.*. 180/// \arg Carry The carry returned by the llvm.*.with.overflow.*. 181/// \returns The result (i.e. sum/product) returned by the intrinsic. 182static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF, 183 const llvm::Intrinsic::ID IntrinsicID, 184 llvm::Value *X, llvm::Value *Y, 185 llvm::Value *&Carry) { 186 // Make sure we have integers of the same width. 187 assert(X->getType() == Y->getType() && 188 "Arguments must be the same type. (Did you forget to make sure both " 189 "arguments have the same integer width?)"); 190 191 llvm::Value *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType()); 192 llvm::Value *Tmp = CGF.Builder.CreateCall2(Callee, X, Y); 193 Carry = CGF.Builder.CreateExtractValue(Tmp, 1); 194 return CGF.Builder.CreateExtractValue(Tmp, 0); 195} 196 197RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, 198 unsigned BuiltinID, const CallExpr *E) { 199 // See if we can constant fold this builtin. If so, don't emit it at all. 200 Expr::EvalResult Result; 201 if (E->EvaluateAsRValue(Result, CGM.getContext()) && 202 !Result.hasSideEffects()) { 203 if (Result.Val.isInt()) 204 return RValue::get(llvm::ConstantInt::get(getLLVMContext(), 205 Result.Val.getInt())); 206 if (Result.Val.isFloat()) 207 return RValue::get(llvm::ConstantFP::get(getLLVMContext(), 208 Result.Val.getFloat())); 209 } 210 211 switch (BuiltinID) { 212 default: break; // Handle intrinsics and libm functions below. 213 case Builtin::BI__builtin___CFStringMakeConstantString: 214 case Builtin::BI__builtin___NSStringMakeConstantString: 215 return RValue::get(CGM.EmitConstantExpr(E, E->getType(), 0)); 216 case Builtin::BI__builtin_stdarg_start: 217 case Builtin::BI__builtin_va_start: 218 case Builtin::BI__builtin_va_end: { 219 Value *ArgValue = EmitVAListRef(E->getArg(0)); 220 llvm::Type *DestType = Int8PtrTy; 221 if (ArgValue->getType() != DestType) 222 ArgValue = Builder.CreateBitCast(ArgValue, DestType, 223 ArgValue->getName().data()); 224 225 Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_end) ? 226 Intrinsic::vaend : Intrinsic::vastart; 227 return RValue::get(Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue)); 228 } 229 case Builtin::BI__builtin_va_copy: { 230 Value *DstPtr = EmitVAListRef(E->getArg(0)); 231 Value *SrcPtr = EmitVAListRef(E->getArg(1)); 232 233 llvm::Type *Type = Int8PtrTy; 234 235 DstPtr = Builder.CreateBitCast(DstPtr, Type); 236 SrcPtr = Builder.CreateBitCast(SrcPtr, Type); 237 return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy), 238 DstPtr, SrcPtr)); 239 } 240 case Builtin::BI__builtin_abs: 241 case Builtin::BI__builtin_labs: 242 case Builtin::BI__builtin_llabs: { 243 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 244 245 Value *NegOp = Builder.CreateNeg(ArgValue, "neg"); 246 Value *CmpResult = 247 Builder.CreateICmpSGE(ArgValue, 248 llvm::Constant::getNullValue(ArgValue->getType()), 249 "abscond"); 250 Value *Result = 251 Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs"); 252 253 return RValue::get(Result); 254 } 255 256 case Builtin::BI__builtin_conj: 257 case Builtin::BI__builtin_conjf: 258 case Builtin::BI__builtin_conjl: { 259 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); 260 Value *Real = ComplexVal.first; 261 Value *Imag = ComplexVal.second; 262 Value *Zero = 263 Imag->getType()->isFPOrFPVectorTy() 264 ? llvm::ConstantFP::getZeroValueForNegation(Imag->getType()) 265 : llvm::Constant::getNullValue(Imag->getType()); 266 267 Imag = Builder.CreateFSub(Zero, Imag, "sub"); 268 return RValue::getComplex(std::make_pair(Real, Imag)); 269 } 270 case Builtin::BI__builtin_creal: 271 case Builtin::BI__builtin_crealf: 272 case Builtin::BI__builtin_creall: 273 case Builtin::BIcreal: 274 case Builtin::BIcrealf: 275 case Builtin::BIcreall: { 276 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); 277 return RValue::get(ComplexVal.first); 278 } 279 280 case Builtin::BI__builtin_cimag: 281 case Builtin::BI__builtin_cimagf: 282 case Builtin::BI__builtin_cimagl: 283 case Builtin::BIcimag: 284 case Builtin::BIcimagf: 285 case Builtin::BIcimagl: { 286 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); 287 return RValue::get(ComplexVal.second); 288 } 289 290 case Builtin::BI__builtin_ctzs: 291 case Builtin::BI__builtin_ctz: 292 case Builtin::BI__builtin_ctzl: 293 case Builtin::BI__builtin_ctzll: { 294 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 295 296 llvm::Type *ArgType = ArgValue->getType(); 297 Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); 298 299 llvm::Type *ResultType = ConvertType(E->getType()); 300 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef()); 301 Value *Result = Builder.CreateCall2(F, ArgValue, ZeroUndef); 302 if (Result->getType() != ResultType) 303 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, 304 "cast"); 305 return RValue::get(Result); 306 } 307 case Builtin::BI__builtin_clzs: 308 case Builtin::BI__builtin_clz: 309 case Builtin::BI__builtin_clzl: 310 case Builtin::BI__builtin_clzll: { 311 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 312 313 llvm::Type *ArgType = ArgValue->getType(); 314 Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType); 315 316 llvm::Type *ResultType = ConvertType(E->getType()); 317 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef()); 318 Value *Result = Builder.CreateCall2(F, ArgValue, ZeroUndef); 319 if (Result->getType() != ResultType) 320 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, 321 "cast"); 322 return RValue::get(Result); 323 } 324 case Builtin::BI__builtin_ffs: 325 case Builtin::BI__builtin_ffsl: 326 case Builtin::BI__builtin_ffsll: { 327 // ffs(x) -> x ? cttz(x) + 1 : 0 328 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 329 330 llvm::Type *ArgType = ArgValue->getType(); 331 Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); 332 333 llvm::Type *ResultType = ConvertType(E->getType()); 334 Value *Tmp = Builder.CreateAdd(Builder.CreateCall2(F, ArgValue, 335 Builder.getTrue()), 336 llvm::ConstantInt::get(ArgType, 1)); 337 Value *Zero = llvm::Constant::getNullValue(ArgType); 338 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero"); 339 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs"); 340 if (Result->getType() != ResultType) 341 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, 342 "cast"); 343 return RValue::get(Result); 344 } 345 case Builtin::BI__builtin_parity: 346 case Builtin::BI__builtin_parityl: 347 case Builtin::BI__builtin_parityll: { 348 // parity(x) -> ctpop(x) & 1 349 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 350 351 llvm::Type *ArgType = ArgValue->getType(); 352 Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType); 353 354 llvm::Type *ResultType = ConvertType(E->getType()); 355 Value *Tmp = Builder.CreateCall(F, ArgValue); 356 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1)); 357 if (Result->getType() != ResultType) 358 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, 359 "cast"); 360 return RValue::get(Result); 361 } 362 case Builtin::BI__builtin_popcount: 363 case Builtin::BI__builtin_popcountl: 364 case Builtin::BI__builtin_popcountll: { 365 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 366 367 llvm::Type *ArgType = ArgValue->getType(); 368 Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType); 369 370 llvm::Type *ResultType = ConvertType(E->getType()); 371 Value *Result = Builder.CreateCall(F, ArgValue); 372 if (Result->getType() != ResultType) 373 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, 374 "cast"); 375 return RValue::get(Result); 376 } 377 case Builtin::BI__builtin_expect: { 378 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 379 llvm::Type *ArgType = ArgValue->getType(); 380 381 Value *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType); 382 Value *ExpectedValue = EmitScalarExpr(E->getArg(1)); 383 384 Value *Result = Builder.CreateCall2(FnExpect, ArgValue, ExpectedValue, 385 "expval"); 386 return RValue::get(Result); 387 } 388 case Builtin::BI__builtin_bswap16: 389 case Builtin::BI__builtin_bswap32: 390 case Builtin::BI__builtin_bswap64: { 391 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 392 llvm::Type *ArgType = ArgValue->getType(); 393 Value *F = CGM.getIntrinsic(Intrinsic::bswap, ArgType); 394 return RValue::get(Builder.CreateCall(F, ArgValue)); 395 } 396 case Builtin::BI__builtin_object_size: { 397 // We rely on constant folding to deal with expressions with side effects. 398 assert(!E->getArg(0)->HasSideEffects(getContext()) && 399 "should have been constant folded"); 400 401 // We pass this builtin onto the optimizer so that it can 402 // figure out the object size in more complex cases. 403 llvm::Type *ResType = ConvertType(E->getType()); 404 405 // LLVM only supports 0 and 2, make sure that we pass along that 406 // as a boolean. 407 Value *Ty = EmitScalarExpr(E->getArg(1)); 408 ConstantInt *CI = dyn_cast<ConstantInt>(Ty); 409 assert(CI); 410 uint64_t val = CI->getZExtValue(); 411 CI = ConstantInt::get(Builder.getInt1Ty(), (val & 0x2) >> 1); 412 // FIXME: Get right address space. 413 llvm::Type *Tys[] = { ResType, Builder.getInt8PtrTy(0) }; 414 Value *F = CGM.getIntrinsic(Intrinsic::objectsize, Tys); 415 return RValue::get(Builder.CreateCall2(F, EmitScalarExpr(E->getArg(0)),CI)); 416 } 417 case Builtin::BI__builtin_prefetch: { 418 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0)); 419 // FIXME: Technically these constants should of type 'int', yes? 420 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) : 421 llvm::ConstantInt::get(Int32Ty, 0); 422 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : 423 llvm::ConstantInt::get(Int32Ty, 3); 424 Value *Data = llvm::ConstantInt::get(Int32Ty, 1); 425 Value *F = CGM.getIntrinsic(Intrinsic::prefetch); 426 return RValue::get(Builder.CreateCall4(F, Address, RW, Locality, Data)); 427 } 428 case Builtin::BI__builtin_readcyclecounter: { 429 Value *F = CGM.getIntrinsic(Intrinsic::readcyclecounter); 430 return RValue::get(Builder.CreateCall(F)); 431 } 432 case Builtin::BI__builtin_trap: { 433 Value *F = CGM.getIntrinsic(Intrinsic::trap); 434 return RValue::get(Builder.CreateCall(F)); 435 } 436 case Builtin::BI__debugbreak: { 437 Value *F = CGM.getIntrinsic(Intrinsic::debugtrap); 438 return RValue::get(Builder.CreateCall(F)); 439 } 440 case Builtin::BI__builtin_unreachable: { 441 if (SanOpts->Unreachable) 442 EmitCheck(Builder.getFalse(), "builtin_unreachable", 443 EmitCheckSourceLocation(E->getExprLoc()), 444 ArrayRef<llvm::Value *>(), CRK_Unrecoverable); 445 else 446 Builder.CreateUnreachable(); 447 448 // We do need to preserve an insertion point. 449 EmitBlock(createBasicBlock("unreachable.cont")); 450 451 return RValue::get(0); 452 } 453 454 case Builtin::BI__builtin_powi: 455 case Builtin::BI__builtin_powif: 456 case Builtin::BI__builtin_powil: { 457 Value *Base = EmitScalarExpr(E->getArg(0)); 458 Value *Exponent = EmitScalarExpr(E->getArg(1)); 459 llvm::Type *ArgType = Base->getType(); 460 Value *F = CGM.getIntrinsic(Intrinsic::powi, ArgType); 461 return RValue::get(Builder.CreateCall2(F, Base, Exponent)); 462 } 463 464 case Builtin::BI__builtin_isgreater: 465 case Builtin::BI__builtin_isgreaterequal: 466 case Builtin::BI__builtin_isless: 467 case Builtin::BI__builtin_islessequal: 468 case Builtin::BI__builtin_islessgreater: 469 case Builtin::BI__builtin_isunordered: { 470 // Ordered comparisons: we know the arguments to these are matching scalar 471 // floating point values. 472 Value *LHS = EmitScalarExpr(E->getArg(0)); 473 Value *RHS = EmitScalarExpr(E->getArg(1)); 474 475 switch (BuiltinID) { 476 default: llvm_unreachable("Unknown ordered comparison"); 477 case Builtin::BI__builtin_isgreater: 478 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp"); 479 break; 480 case Builtin::BI__builtin_isgreaterequal: 481 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp"); 482 break; 483 case Builtin::BI__builtin_isless: 484 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp"); 485 break; 486 case Builtin::BI__builtin_islessequal: 487 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp"); 488 break; 489 case Builtin::BI__builtin_islessgreater: 490 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp"); 491 break; 492 case Builtin::BI__builtin_isunordered: 493 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp"); 494 break; 495 } 496 // ZExt bool to int type. 497 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType()))); 498 } 499 case Builtin::BI__builtin_isnan: { 500 Value *V = EmitScalarExpr(E->getArg(0)); 501 V = Builder.CreateFCmpUNO(V, V, "cmp"); 502 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); 503 } 504 505 case Builtin::BI__builtin_isinf: { 506 // isinf(x) --> fabs(x) == infinity 507 Value *V = EmitScalarExpr(E->getArg(0)); 508 V = EmitFAbs(*this, V, E->getArg(0)->getType()); 509 510 V = Builder.CreateFCmpOEQ(V, ConstantFP::getInfinity(V->getType()),"isinf"); 511 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); 512 } 513 514 // TODO: BI__builtin_isinf_sign 515 // isinf_sign(x) -> isinf(x) ? (signbit(x) ? -1 : 1) : 0 516 517 case Builtin::BI__builtin_isnormal: { 518 // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min 519 Value *V = EmitScalarExpr(E->getArg(0)); 520 Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq"); 521 522 Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType()); 523 Value *IsLessThanInf = 524 Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf"); 525 APFloat Smallest = APFloat::getSmallestNormalized( 526 getContext().getFloatTypeSemantics(E->getArg(0)->getType())); 527 Value *IsNormal = 528 Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest), 529 "isnormal"); 530 V = Builder.CreateAnd(Eq, IsLessThanInf, "and"); 531 V = Builder.CreateAnd(V, IsNormal, "and"); 532 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); 533 } 534 535 case Builtin::BI__builtin_isfinite: { 536 // isfinite(x) --> x == x && fabs(x) != infinity; 537 Value *V = EmitScalarExpr(E->getArg(0)); 538 Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq"); 539 540 Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType()); 541 Value *IsNotInf = 542 Builder.CreateFCmpUNE(Abs, ConstantFP::getInfinity(V->getType()),"isinf"); 543 544 V = Builder.CreateAnd(Eq, IsNotInf, "and"); 545 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); 546 } 547 548 case Builtin::BI__builtin_fpclassify: { 549 Value *V = EmitScalarExpr(E->getArg(5)); 550 llvm::Type *Ty = ConvertType(E->getArg(5)->getType()); 551 552 // Create Result 553 BasicBlock *Begin = Builder.GetInsertBlock(); 554 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn); 555 Builder.SetInsertPoint(End); 556 PHINode *Result = 557 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4, 558 "fpclassify_result"); 559 560 // if (V==0) return FP_ZERO 561 Builder.SetInsertPoint(Begin); 562 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty), 563 "iszero"); 564 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4)); 565 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn); 566 Builder.CreateCondBr(IsZero, End, NotZero); 567 Result->addIncoming(ZeroLiteral, Begin); 568 569 // if (V != V) return FP_NAN 570 Builder.SetInsertPoint(NotZero); 571 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp"); 572 Value *NanLiteral = EmitScalarExpr(E->getArg(0)); 573 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn); 574 Builder.CreateCondBr(IsNan, End, NotNan); 575 Result->addIncoming(NanLiteral, NotZero); 576 577 // if (fabs(V) == infinity) return FP_INFINITY 578 Builder.SetInsertPoint(NotNan); 579 Value *VAbs = EmitFAbs(*this, V, E->getArg(5)->getType()); 580 Value *IsInf = 581 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()), 582 "isinf"); 583 Value *InfLiteral = EmitScalarExpr(E->getArg(1)); 584 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn); 585 Builder.CreateCondBr(IsInf, End, NotInf); 586 Result->addIncoming(InfLiteral, NotNan); 587 588 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL 589 Builder.SetInsertPoint(NotInf); 590 APFloat Smallest = APFloat::getSmallestNormalized( 591 getContext().getFloatTypeSemantics(E->getArg(5)->getType())); 592 Value *IsNormal = 593 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest), 594 "isnormal"); 595 Value *NormalResult = 596 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)), 597 EmitScalarExpr(E->getArg(3))); 598 Builder.CreateBr(End); 599 Result->addIncoming(NormalResult, NotInf); 600 601 // return Result 602 Builder.SetInsertPoint(End); 603 return RValue::get(Result); 604 } 605 606 case Builtin::BIalloca: 607 case Builtin::BI_alloca: 608 case Builtin::BI__builtin_alloca: { 609 Value *Size = EmitScalarExpr(E->getArg(0)); 610 return RValue::get(Builder.CreateAlloca(Builder.getInt8Ty(), Size)); 611 } 612 case Builtin::BIbzero: 613 case Builtin::BI__builtin_bzero: { 614 std::pair<llvm::Value*, unsigned> Dest = 615 EmitPointerWithAlignment(E->getArg(0)); 616 Value *SizeVal = EmitScalarExpr(E->getArg(1)); 617 Builder.CreateMemSet(Dest.first, Builder.getInt8(0), SizeVal, 618 Dest.second, false); 619 return RValue::get(Dest.first); 620 } 621 case Builtin::BImemcpy: 622 case Builtin::BI__builtin_memcpy: { 623 std::pair<llvm::Value*, unsigned> Dest = 624 EmitPointerWithAlignment(E->getArg(0)); 625 std::pair<llvm::Value*, unsigned> Src = 626 EmitPointerWithAlignment(E->getArg(1)); 627 Value *SizeVal = EmitScalarExpr(E->getArg(2)); 628 unsigned Align = std::min(Dest.second, Src.second); 629 Builder.CreateMemCpy(Dest.first, Src.first, SizeVal, Align, false); 630 return RValue::get(Dest.first); 631 } 632 633 case Builtin::BI__builtin___memcpy_chk: { 634 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2. 635 llvm::APSInt Size, DstSize; 636 if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) || 637 !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext())) 638 break; 639 if (Size.ugt(DstSize)) 640 break; 641 std::pair<llvm::Value*, unsigned> Dest = 642 EmitPointerWithAlignment(E->getArg(0)); 643 std::pair<llvm::Value*, unsigned> Src = 644 EmitPointerWithAlignment(E->getArg(1)); 645 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); 646 unsigned Align = std::min(Dest.second, Src.second); 647 Builder.CreateMemCpy(Dest.first, Src.first, SizeVal, Align, false); 648 return RValue::get(Dest.first); 649 } 650 651 case Builtin::BI__builtin_objc_memmove_collectable: { 652 Value *Address = EmitScalarExpr(E->getArg(0)); 653 Value *SrcAddr = EmitScalarExpr(E->getArg(1)); 654 Value *SizeVal = EmitScalarExpr(E->getArg(2)); 655 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, 656 Address, SrcAddr, SizeVal); 657 return RValue::get(Address); 658 } 659 660 case Builtin::BI__builtin___memmove_chk: { 661 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2. 662 llvm::APSInt Size, DstSize; 663 if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) || 664 !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext())) 665 break; 666 if (Size.ugt(DstSize)) 667 break; 668 std::pair<llvm::Value*, unsigned> Dest = 669 EmitPointerWithAlignment(E->getArg(0)); 670 std::pair<llvm::Value*, unsigned> Src = 671 EmitPointerWithAlignment(E->getArg(1)); 672 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); 673 unsigned Align = std::min(Dest.second, Src.second); 674 Builder.CreateMemMove(Dest.first, Src.first, SizeVal, Align, false); 675 return RValue::get(Dest.first); 676 } 677 678 case Builtin::BImemmove: 679 case Builtin::BI__builtin_memmove: { 680 std::pair<llvm::Value*, unsigned> Dest = 681 EmitPointerWithAlignment(E->getArg(0)); 682 std::pair<llvm::Value*, unsigned> Src = 683 EmitPointerWithAlignment(E->getArg(1)); 684 Value *SizeVal = EmitScalarExpr(E->getArg(2)); 685 unsigned Align = std::min(Dest.second, Src.second); 686 Builder.CreateMemMove(Dest.first, Src.first, SizeVal, Align, false); 687 return RValue::get(Dest.first); 688 } 689 case Builtin::BImemset: 690 case Builtin::BI__builtin_memset: { 691 std::pair<llvm::Value*, unsigned> Dest = 692 EmitPointerWithAlignment(E->getArg(0)); 693 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), 694 Builder.getInt8Ty()); 695 Value *SizeVal = EmitScalarExpr(E->getArg(2)); 696 Builder.CreateMemSet(Dest.first, ByteVal, SizeVal, Dest.second, false); 697 return RValue::get(Dest.first); 698 } 699 case Builtin::BI__builtin___memset_chk: { 700 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2. 701 llvm::APSInt Size, DstSize; 702 if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) || 703 !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext())) 704 break; 705 if (Size.ugt(DstSize)) 706 break; 707 std::pair<llvm::Value*, unsigned> Dest = 708 EmitPointerWithAlignment(E->getArg(0)); 709 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), 710 Builder.getInt8Ty()); 711 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); 712 Builder.CreateMemSet(Dest.first, ByteVal, SizeVal, Dest.second, false); 713 return RValue::get(Dest.first); 714 } 715 case Builtin::BI__builtin_dwarf_cfa: { 716 // The offset in bytes from the first argument to the CFA. 717 // 718 // Why on earth is this in the frontend? Is there any reason at 719 // all that the backend can't reasonably determine this while 720 // lowering llvm.eh.dwarf.cfa()? 721 // 722 // TODO: If there's a satisfactory reason, add a target hook for 723 // this instead of hard-coding 0, which is correct for most targets. 724 int32_t Offset = 0; 725 726 Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa); 727 return RValue::get(Builder.CreateCall(F, 728 llvm::ConstantInt::get(Int32Ty, Offset))); 729 } 730 case Builtin::BI__builtin_return_address: { 731 Value *Depth = EmitScalarExpr(E->getArg(0)); 732 Depth = Builder.CreateIntCast(Depth, Int32Ty, false); 733 Value *F = CGM.getIntrinsic(Intrinsic::returnaddress); 734 return RValue::get(Builder.CreateCall(F, Depth)); 735 } 736 case Builtin::BI__builtin_frame_address: { 737 Value *Depth = EmitScalarExpr(E->getArg(0)); 738 Depth = Builder.CreateIntCast(Depth, Int32Ty, false); 739 Value *F = CGM.getIntrinsic(Intrinsic::frameaddress); 740 return RValue::get(Builder.CreateCall(F, Depth)); 741 } 742 case Builtin::BI__builtin_extract_return_addr: { 743 Value *Address = EmitScalarExpr(E->getArg(0)); 744 Value *Result = getTargetHooks().decodeReturnAddress(*this, Address); 745 return RValue::get(Result); 746 } 747 case Builtin::BI__builtin_frob_return_addr: { 748 Value *Address = EmitScalarExpr(E->getArg(0)); 749 Value *Result = getTargetHooks().encodeReturnAddress(*this, Address); 750 return RValue::get(Result); 751 } 752 case Builtin::BI__builtin_dwarf_sp_column: { 753 llvm::IntegerType *Ty 754 = cast<llvm::IntegerType>(ConvertType(E->getType())); 755 int Column = getTargetHooks().getDwarfEHStackPointer(CGM); 756 if (Column == -1) { 757 CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column"); 758 return RValue::get(llvm::UndefValue::get(Ty)); 759 } 760 return RValue::get(llvm::ConstantInt::get(Ty, Column, true)); 761 } 762 case Builtin::BI__builtin_init_dwarf_reg_size_table: { 763 Value *Address = EmitScalarExpr(E->getArg(0)); 764 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address)) 765 CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table"); 766 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType()))); 767 } 768 case Builtin::BI__builtin_eh_return: { 769 Value *Int = EmitScalarExpr(E->getArg(0)); 770 Value *Ptr = EmitScalarExpr(E->getArg(1)); 771 772 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType()); 773 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) && 774 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants"); 775 Value *F = CGM.getIntrinsic(IntTy->getBitWidth() == 32 776 ? Intrinsic::eh_return_i32 777 : Intrinsic::eh_return_i64); 778 Builder.CreateCall2(F, Int, Ptr); 779 Builder.CreateUnreachable(); 780 781 // We do need to preserve an insertion point. 782 EmitBlock(createBasicBlock("builtin_eh_return.cont")); 783 784 return RValue::get(0); 785 } 786 case Builtin::BI__builtin_unwind_init: { 787 Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init); 788 return RValue::get(Builder.CreateCall(F)); 789 } 790 case Builtin::BI__builtin_extend_pointer: { 791 // Extends a pointer to the size of an _Unwind_Word, which is 792 // uint64_t on all platforms. Generally this gets poked into a 793 // register and eventually used as an address, so if the 794 // addressing registers are wider than pointers and the platform 795 // doesn't implicitly ignore high-order bits when doing 796 // addressing, we need to make sure we zext / sext based on 797 // the platform's expectations. 798 // 799 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html 800 801 // Cast the pointer to intptr_t. 802 Value *Ptr = EmitScalarExpr(E->getArg(0)); 803 Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast"); 804 805 // If that's 64 bits, we're done. 806 if (IntPtrTy->getBitWidth() == 64) 807 return RValue::get(Result); 808 809 // Otherwise, ask the codegen data what to do. 810 if (getTargetHooks().extendPointerWithSExt()) 811 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext")); 812 else 813 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext")); 814 } 815 case Builtin::BI__builtin_setjmp: { 816 // Buffer is a void**. 817 Value *Buf = EmitScalarExpr(E->getArg(0)); 818 819 // Store the frame pointer to the setjmp buffer. 820 Value *FrameAddr = 821 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress), 822 ConstantInt::get(Int32Ty, 0)); 823 Builder.CreateStore(FrameAddr, Buf); 824 825 // Store the stack pointer to the setjmp buffer. 826 Value *StackAddr = 827 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave)); 828 Value *StackSaveSlot = 829 Builder.CreateGEP(Buf, ConstantInt::get(Int32Ty, 2)); 830 Builder.CreateStore(StackAddr, StackSaveSlot); 831 832 // Call LLVM's EH setjmp, which is lightweight. 833 Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp); 834 Buf = Builder.CreateBitCast(Buf, Int8PtrTy); 835 return RValue::get(Builder.CreateCall(F, Buf)); 836 } 837 case Builtin::BI__builtin_longjmp: { 838 Value *Buf = EmitScalarExpr(E->getArg(0)); 839 Buf = Builder.CreateBitCast(Buf, Int8PtrTy); 840 841 // Call LLVM's EH longjmp, which is lightweight. 842 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf); 843 844 // longjmp doesn't return; mark this as unreachable. 845 Builder.CreateUnreachable(); 846 847 // We do need to preserve an insertion point. 848 EmitBlock(createBasicBlock("longjmp.cont")); 849 850 return RValue::get(0); 851 } 852 case Builtin::BI__sync_fetch_and_add: 853 case Builtin::BI__sync_fetch_and_sub: 854 case Builtin::BI__sync_fetch_and_or: 855 case Builtin::BI__sync_fetch_and_and: 856 case Builtin::BI__sync_fetch_and_xor: 857 case Builtin::BI__sync_add_and_fetch: 858 case Builtin::BI__sync_sub_and_fetch: 859 case Builtin::BI__sync_and_and_fetch: 860 case Builtin::BI__sync_or_and_fetch: 861 case Builtin::BI__sync_xor_and_fetch: 862 case Builtin::BI__sync_val_compare_and_swap: 863 case Builtin::BI__sync_bool_compare_and_swap: 864 case Builtin::BI__sync_lock_test_and_set: 865 case Builtin::BI__sync_lock_release: 866 case Builtin::BI__sync_swap: 867 llvm_unreachable("Shouldn't make it through sema"); 868 case Builtin::BI__sync_fetch_and_add_1: 869 case Builtin::BI__sync_fetch_and_add_2: 870 case Builtin::BI__sync_fetch_and_add_4: 871 case Builtin::BI__sync_fetch_and_add_8: 872 case Builtin::BI__sync_fetch_and_add_16: 873 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E); 874 case Builtin::BI__sync_fetch_and_sub_1: 875 case Builtin::BI__sync_fetch_and_sub_2: 876 case Builtin::BI__sync_fetch_and_sub_4: 877 case Builtin::BI__sync_fetch_and_sub_8: 878 case Builtin::BI__sync_fetch_and_sub_16: 879 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E); 880 case Builtin::BI__sync_fetch_and_or_1: 881 case Builtin::BI__sync_fetch_and_or_2: 882 case Builtin::BI__sync_fetch_and_or_4: 883 case Builtin::BI__sync_fetch_and_or_8: 884 case Builtin::BI__sync_fetch_and_or_16: 885 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E); 886 case Builtin::BI__sync_fetch_and_and_1: 887 case Builtin::BI__sync_fetch_and_and_2: 888 case Builtin::BI__sync_fetch_and_and_4: 889 case Builtin::BI__sync_fetch_and_and_8: 890 case Builtin::BI__sync_fetch_and_and_16: 891 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E); 892 case Builtin::BI__sync_fetch_and_xor_1: 893 case Builtin::BI__sync_fetch_and_xor_2: 894 case Builtin::BI__sync_fetch_and_xor_4: 895 case Builtin::BI__sync_fetch_and_xor_8: 896 case Builtin::BI__sync_fetch_and_xor_16: 897 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E); 898 899 // Clang extensions: not overloaded yet. 900 case Builtin::BI__sync_fetch_and_min: 901 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E); 902 case Builtin::BI__sync_fetch_and_max: 903 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E); 904 case Builtin::BI__sync_fetch_and_umin: 905 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E); 906 case Builtin::BI__sync_fetch_and_umax: 907 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E); 908 909 case Builtin::BI__sync_add_and_fetch_1: 910 case Builtin::BI__sync_add_and_fetch_2: 911 case Builtin::BI__sync_add_and_fetch_4: 912 case Builtin::BI__sync_add_and_fetch_8: 913 case Builtin::BI__sync_add_and_fetch_16: 914 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E, 915 llvm::Instruction::Add); 916 case Builtin::BI__sync_sub_and_fetch_1: 917 case Builtin::BI__sync_sub_and_fetch_2: 918 case Builtin::BI__sync_sub_and_fetch_4: 919 case Builtin::BI__sync_sub_and_fetch_8: 920 case Builtin::BI__sync_sub_and_fetch_16: 921 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E, 922 llvm::Instruction::Sub); 923 case Builtin::BI__sync_and_and_fetch_1: 924 case Builtin::BI__sync_and_and_fetch_2: 925 case Builtin::BI__sync_and_and_fetch_4: 926 case Builtin::BI__sync_and_and_fetch_8: 927 case Builtin::BI__sync_and_and_fetch_16: 928 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E, 929 llvm::Instruction::And); 930 case Builtin::BI__sync_or_and_fetch_1: 931 case Builtin::BI__sync_or_and_fetch_2: 932 case Builtin::BI__sync_or_and_fetch_4: 933 case Builtin::BI__sync_or_and_fetch_8: 934 case Builtin::BI__sync_or_and_fetch_16: 935 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E, 936 llvm::Instruction::Or); 937 case Builtin::BI__sync_xor_and_fetch_1: 938 case Builtin::BI__sync_xor_and_fetch_2: 939 case Builtin::BI__sync_xor_and_fetch_4: 940 case Builtin::BI__sync_xor_and_fetch_8: 941 case Builtin::BI__sync_xor_and_fetch_16: 942 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E, 943 llvm::Instruction::Xor); 944 945 case Builtin::BI__sync_val_compare_and_swap_1: 946 case Builtin::BI__sync_val_compare_and_swap_2: 947 case Builtin::BI__sync_val_compare_and_swap_4: 948 case Builtin::BI__sync_val_compare_and_swap_8: 949 case Builtin::BI__sync_val_compare_and_swap_16: { 950 QualType T = E->getType(); 951 llvm::Value *DestPtr = EmitScalarExpr(E->getArg(0)); 952 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); 953 954 llvm::IntegerType *IntType = 955 llvm::IntegerType::get(getLLVMContext(), 956 getContext().getTypeSize(T)); 957 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); 958 959 Value *Args[3]; 960 Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType); 961 Args[1] = EmitScalarExpr(E->getArg(1)); 962 llvm::Type *ValueType = Args[1]->getType(); 963 Args[1] = EmitToInt(*this, Args[1], T, IntType); 964 Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType); 965 966 Value *Result = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2], 967 llvm::SequentiallyConsistent); 968 Result = EmitFromInt(*this, Result, T, ValueType); 969 return RValue::get(Result); 970 } 971 972 case Builtin::BI__sync_bool_compare_and_swap_1: 973 case Builtin::BI__sync_bool_compare_and_swap_2: 974 case Builtin::BI__sync_bool_compare_and_swap_4: 975 case Builtin::BI__sync_bool_compare_and_swap_8: 976 case Builtin::BI__sync_bool_compare_and_swap_16: { 977 QualType T = E->getArg(1)->getType(); 978 llvm::Value *DestPtr = EmitScalarExpr(E->getArg(0)); 979 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); 980 981 llvm::IntegerType *IntType = 982 llvm::IntegerType::get(getLLVMContext(), 983 getContext().getTypeSize(T)); 984 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); 985 986 Value *Args[3]; 987 Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType); 988 Args[1] = EmitToInt(*this, EmitScalarExpr(E->getArg(1)), T, IntType); 989 Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType); 990 991 Value *OldVal = Args[1]; 992 Value *PrevVal = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2], 993 llvm::SequentiallyConsistent); 994 Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal); 995 // zext bool to int. 996 Result = Builder.CreateZExt(Result, ConvertType(E->getType())); 997 return RValue::get(Result); 998 } 999 1000 case Builtin::BI__sync_swap_1: 1001 case Builtin::BI__sync_swap_2: 1002 case Builtin::BI__sync_swap_4: 1003 case Builtin::BI__sync_swap_8: 1004 case Builtin::BI__sync_swap_16: 1005 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E); 1006 1007 case Builtin::BI__sync_lock_test_and_set_1: 1008 case Builtin::BI__sync_lock_test_and_set_2: 1009 case Builtin::BI__sync_lock_test_and_set_4: 1010 case Builtin::BI__sync_lock_test_and_set_8: 1011 case Builtin::BI__sync_lock_test_and_set_16: 1012 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E); 1013 1014 case Builtin::BI__sync_lock_release_1: 1015 case Builtin::BI__sync_lock_release_2: 1016 case Builtin::BI__sync_lock_release_4: 1017 case Builtin::BI__sync_lock_release_8: 1018 case Builtin::BI__sync_lock_release_16: { 1019 Value *Ptr = EmitScalarExpr(E->getArg(0)); 1020 QualType ElTy = E->getArg(0)->getType()->getPointeeType(); 1021 CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy); 1022 llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(), 1023 StoreSize.getQuantity() * 8); 1024 Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo()); 1025 llvm::StoreInst *Store = 1026 Builder.CreateStore(llvm::Constant::getNullValue(ITy), Ptr); 1027 Store->setAlignment(StoreSize.getQuantity()); 1028 Store->setAtomic(llvm::Release); 1029 return RValue::get(0); 1030 } 1031 1032 case Builtin::BI__sync_synchronize: { 1033 // We assume this is supposed to correspond to a C++0x-style 1034 // sequentially-consistent fence (i.e. this is only usable for 1035 // synchonization, not device I/O or anything like that). This intrinsic 1036 // is really badly designed in the sense that in theory, there isn't 1037 // any way to safely use it... but in practice, it mostly works 1038 // to use it with non-atomic loads and stores to get acquire/release 1039 // semantics. 1040 Builder.CreateFence(llvm::SequentiallyConsistent); 1041 return RValue::get(0); 1042 } 1043 1044 case Builtin::BI__c11_atomic_is_lock_free: 1045 case Builtin::BI__atomic_is_lock_free: { 1046 // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the 1047 // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since 1048 // _Atomic(T) is always properly-aligned. 1049 const char *LibCallName = "__atomic_is_lock_free"; 1050 CallArgList Args; 1051 Args.add(RValue::get(EmitScalarExpr(E->getArg(0))), 1052 getContext().getSizeType()); 1053 if (BuiltinID == Builtin::BI__atomic_is_lock_free) 1054 Args.add(RValue::get(EmitScalarExpr(E->getArg(1))), 1055 getContext().VoidPtrTy); 1056 else 1057 Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)), 1058 getContext().VoidPtrTy); 1059 const CGFunctionInfo &FuncInfo = 1060 CGM.getTypes().arrangeFreeFunctionCall(E->getType(), Args, 1061 FunctionType::ExtInfo(), 1062 RequiredArgs::All); 1063 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo); 1064 llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName); 1065 return EmitCall(FuncInfo, Func, ReturnValueSlot(), Args); 1066 } 1067 1068 case Builtin::BI__atomic_test_and_set: { 1069 // Look at the argument type to determine whether this is a volatile 1070 // operation. The parameter type is always volatile. 1071 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); 1072 bool Volatile = 1073 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); 1074 1075 Value *Ptr = EmitScalarExpr(E->getArg(0)); 1076 unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace(); 1077 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace)); 1078 Value *NewVal = Builder.getInt8(1); 1079 Value *Order = EmitScalarExpr(E->getArg(1)); 1080 if (isa<llvm::ConstantInt>(Order)) { 1081 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); 1082 AtomicRMWInst *Result = 0; 1083 switch (ord) { 1084 case 0: // memory_order_relaxed 1085 default: // invalid order 1086 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, 1087 Ptr, NewVal, 1088 llvm::Monotonic); 1089 break; 1090 case 1: // memory_order_consume 1091 case 2: // memory_order_acquire 1092 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, 1093 Ptr, NewVal, 1094 llvm::Acquire); 1095 break; 1096 case 3: // memory_order_release 1097 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, 1098 Ptr, NewVal, 1099 llvm::Release); 1100 break; 1101 case 4: // memory_order_acq_rel 1102 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, 1103 Ptr, NewVal, 1104 llvm::AcquireRelease); 1105 break; 1106 case 5: // memory_order_seq_cst 1107 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, 1108 Ptr, NewVal, 1109 llvm::SequentiallyConsistent); 1110 break; 1111 } 1112 Result->setVolatile(Volatile); 1113 return RValue::get(Builder.CreateIsNotNull(Result, "tobool")); 1114 } 1115 1116 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); 1117 1118 llvm::BasicBlock *BBs[5] = { 1119 createBasicBlock("monotonic", CurFn), 1120 createBasicBlock("acquire", CurFn), 1121 createBasicBlock("release", CurFn), 1122 createBasicBlock("acqrel", CurFn), 1123 createBasicBlock("seqcst", CurFn) 1124 }; 1125 llvm::AtomicOrdering Orders[5] = { 1126 llvm::Monotonic, llvm::Acquire, llvm::Release, 1127 llvm::AcquireRelease, llvm::SequentiallyConsistent 1128 }; 1129 1130 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); 1131 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]); 1132 1133 Builder.SetInsertPoint(ContBB); 1134 PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set"); 1135 1136 for (unsigned i = 0; i < 5; ++i) { 1137 Builder.SetInsertPoint(BBs[i]); 1138 AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, 1139 Ptr, NewVal, Orders[i]); 1140 RMW->setVolatile(Volatile); 1141 Result->addIncoming(RMW, BBs[i]); 1142 Builder.CreateBr(ContBB); 1143 } 1144 1145 SI->addCase(Builder.getInt32(0), BBs[0]); 1146 SI->addCase(Builder.getInt32(1), BBs[1]); 1147 SI->addCase(Builder.getInt32(2), BBs[1]); 1148 SI->addCase(Builder.getInt32(3), BBs[2]); 1149 SI->addCase(Builder.getInt32(4), BBs[3]); 1150 SI->addCase(Builder.getInt32(5), BBs[4]); 1151 1152 Builder.SetInsertPoint(ContBB); 1153 return RValue::get(Builder.CreateIsNotNull(Result, "tobool")); 1154 } 1155 1156 case Builtin::BI__atomic_clear: { 1157 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); 1158 bool Volatile = 1159 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); 1160 1161 Value *Ptr = EmitScalarExpr(E->getArg(0)); 1162 unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace(); 1163 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace)); 1164 Value *NewVal = Builder.getInt8(0); 1165 Value *Order = EmitScalarExpr(E->getArg(1)); 1166 if (isa<llvm::ConstantInt>(Order)) { 1167 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); 1168 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile); 1169 Store->setAlignment(1); 1170 switch (ord) { 1171 case 0: // memory_order_relaxed 1172 default: // invalid order 1173 Store->setOrdering(llvm::Monotonic); 1174 break; 1175 case 3: // memory_order_release 1176 Store->setOrdering(llvm::Release); 1177 break; 1178 case 5: // memory_order_seq_cst 1179 Store->setOrdering(llvm::SequentiallyConsistent); 1180 break; 1181 } 1182 return RValue::get(0); 1183 } 1184 1185 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); 1186 1187 llvm::BasicBlock *BBs[3] = { 1188 createBasicBlock("monotonic", CurFn), 1189 createBasicBlock("release", CurFn), 1190 createBasicBlock("seqcst", CurFn) 1191 }; 1192 llvm::AtomicOrdering Orders[3] = { 1193 llvm::Monotonic, llvm::Release, llvm::SequentiallyConsistent 1194 }; 1195 1196 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); 1197 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]); 1198 1199 for (unsigned i = 0; i < 3; ++i) { 1200 Builder.SetInsertPoint(BBs[i]); 1201 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile); 1202 Store->setAlignment(1); 1203 Store->setOrdering(Orders[i]); 1204 Builder.CreateBr(ContBB); 1205 } 1206 1207 SI->addCase(Builder.getInt32(0), BBs[0]); 1208 SI->addCase(Builder.getInt32(3), BBs[1]); 1209 SI->addCase(Builder.getInt32(5), BBs[2]); 1210 1211 Builder.SetInsertPoint(ContBB); 1212 return RValue::get(0); 1213 } 1214 1215 case Builtin::BI__atomic_thread_fence: 1216 case Builtin::BI__atomic_signal_fence: 1217 case Builtin::BI__c11_atomic_thread_fence: 1218 case Builtin::BI__c11_atomic_signal_fence: { 1219 llvm::SynchronizationScope Scope; 1220 if (BuiltinID == Builtin::BI__atomic_signal_fence || 1221 BuiltinID == Builtin::BI__c11_atomic_signal_fence) 1222 Scope = llvm::SingleThread; 1223 else 1224 Scope = llvm::CrossThread; 1225 Value *Order = EmitScalarExpr(E->getArg(0)); 1226 if (isa<llvm::ConstantInt>(Order)) { 1227 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); 1228 switch (ord) { 1229 case 0: // memory_order_relaxed 1230 default: // invalid order 1231 break; 1232 case 1: // memory_order_consume 1233 case 2: // memory_order_acquire 1234 Builder.CreateFence(llvm::Acquire, Scope); 1235 break; 1236 case 3: // memory_order_release 1237 Builder.CreateFence(llvm::Release, Scope); 1238 break; 1239 case 4: // memory_order_acq_rel 1240 Builder.CreateFence(llvm::AcquireRelease, Scope); 1241 break; 1242 case 5: // memory_order_seq_cst 1243 Builder.CreateFence(llvm::SequentiallyConsistent, Scope); 1244 break; 1245 } 1246 return RValue::get(0); 1247 } 1248 1249 llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB; 1250 AcquireBB = createBasicBlock("acquire", CurFn); 1251 ReleaseBB = createBasicBlock("release", CurFn); 1252 AcqRelBB = createBasicBlock("acqrel", CurFn); 1253 SeqCstBB = createBasicBlock("seqcst", CurFn); 1254 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); 1255 1256 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); 1257 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB); 1258 1259 Builder.SetInsertPoint(AcquireBB); 1260 Builder.CreateFence(llvm::Acquire, Scope); 1261 Builder.CreateBr(ContBB); 1262 SI->addCase(Builder.getInt32(1), AcquireBB); 1263 SI->addCase(Builder.getInt32(2), AcquireBB); 1264 1265 Builder.SetInsertPoint(ReleaseBB); 1266 Builder.CreateFence(llvm::Release, Scope); 1267 Builder.CreateBr(ContBB); 1268 SI->addCase(Builder.getInt32(3), ReleaseBB); 1269 1270 Builder.SetInsertPoint(AcqRelBB); 1271 Builder.CreateFence(llvm::AcquireRelease, Scope); 1272 Builder.CreateBr(ContBB); 1273 SI->addCase(Builder.getInt32(4), AcqRelBB); 1274 1275 Builder.SetInsertPoint(SeqCstBB); 1276 Builder.CreateFence(llvm::SequentiallyConsistent, Scope); 1277 Builder.CreateBr(ContBB); 1278 SI->addCase(Builder.getInt32(5), SeqCstBB); 1279 1280 Builder.SetInsertPoint(ContBB); 1281 return RValue::get(0); 1282 } 1283 1284 // Library functions with special handling. 1285 case Builtin::BIsqrt: 1286 case Builtin::BIsqrtf: 1287 case Builtin::BIsqrtl: { 1288 // Transform a call to sqrt* into a @llvm.sqrt.* intrinsic call, but only 1289 // in finite- or unsafe-math mode (the intrinsic has different semantics 1290 // for handling negative numbers compared to the library function, so 1291 // -fmath-errno=0 is not enough). 1292 if (!FD->hasAttr<ConstAttr>()) 1293 break; 1294 if (!(CGM.getCodeGenOpts().UnsafeFPMath || 1295 CGM.getCodeGenOpts().NoNaNsFPMath)) 1296 break; 1297 Value *Arg0 = EmitScalarExpr(E->getArg(0)); 1298 llvm::Type *ArgType = Arg0->getType(); 1299 Value *F = CGM.getIntrinsic(Intrinsic::sqrt, ArgType); 1300 return RValue::get(Builder.CreateCall(F, Arg0)); 1301 } 1302 1303 case Builtin::BIpow: 1304 case Builtin::BIpowf: 1305 case Builtin::BIpowl: { 1306 // Transform a call to pow* into a @llvm.pow.* intrinsic call. 1307 if (!FD->hasAttr<ConstAttr>()) 1308 break; 1309 Value *Base = EmitScalarExpr(E->getArg(0)); 1310 Value *Exponent = EmitScalarExpr(E->getArg(1)); 1311 llvm::Type *ArgType = Base->getType(); 1312 Value *F = CGM.getIntrinsic(Intrinsic::pow, ArgType); 1313 return RValue::get(Builder.CreateCall2(F, Base, Exponent)); 1314 break; 1315 } 1316 1317 case Builtin::BIfma: 1318 case Builtin::BIfmaf: 1319 case Builtin::BIfmal: 1320 case Builtin::BI__builtin_fma: 1321 case Builtin::BI__builtin_fmaf: 1322 case Builtin::BI__builtin_fmal: { 1323 // Rewrite fma to intrinsic. 1324 Value *FirstArg = EmitScalarExpr(E->getArg(0)); 1325 llvm::Type *ArgType = FirstArg->getType(); 1326 Value *F = CGM.getIntrinsic(Intrinsic::fma, ArgType); 1327 return RValue::get(Builder.CreateCall3(F, FirstArg, 1328 EmitScalarExpr(E->getArg(1)), 1329 EmitScalarExpr(E->getArg(2)))); 1330 } 1331 1332 case Builtin::BI__builtin_signbit: 1333 case Builtin::BI__builtin_signbitf: 1334 case Builtin::BI__builtin_signbitl: { 1335 LLVMContext &C = CGM.getLLVMContext(); 1336 1337 Value *Arg = EmitScalarExpr(E->getArg(0)); 1338 llvm::Type *ArgTy = Arg->getType(); 1339 if (ArgTy->isPPC_FP128Ty()) 1340 break; // FIXME: I'm not sure what the right implementation is here. 1341 int ArgWidth = ArgTy->getPrimitiveSizeInBits(); 1342 llvm::Type *ArgIntTy = llvm::IntegerType::get(C, ArgWidth); 1343 Value *BCArg = Builder.CreateBitCast(Arg, ArgIntTy); 1344 Value *ZeroCmp = llvm::Constant::getNullValue(ArgIntTy); 1345 Value *Result = Builder.CreateICmpSLT(BCArg, ZeroCmp); 1346 return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType()))); 1347 } 1348 case Builtin::BI__builtin_annotation: { 1349 llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0)); 1350 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::annotation, 1351 AnnVal->getType()); 1352 1353 // Get the annotation string, go through casts. Sema requires this to be a 1354 // non-wide string literal, potentially casted, so the cast<> is safe. 1355 const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts(); 1356 StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString(); 1357 return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc())); 1358 } 1359 case Builtin::BI__builtin_addcb: 1360 case Builtin::BI__builtin_addcs: 1361 case Builtin::BI__builtin_addc: 1362 case Builtin::BI__builtin_addcl: 1363 case Builtin::BI__builtin_addcll: 1364 case Builtin::BI__builtin_subcb: 1365 case Builtin::BI__builtin_subcs: 1366 case Builtin::BI__builtin_subc: 1367 case Builtin::BI__builtin_subcl: 1368 case Builtin::BI__builtin_subcll: { 1369 1370 // We translate all of these builtins from expressions of the form: 1371 // int x = ..., y = ..., carryin = ..., carryout, result; 1372 // result = __builtin_addc(x, y, carryin, &carryout); 1373 // 1374 // to LLVM IR of the form: 1375 // 1376 // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y) 1377 // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0 1378 // %carry1 = extractvalue {i32, i1} %tmp1, 1 1379 // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1, 1380 // i32 %carryin) 1381 // %result = extractvalue {i32, i1} %tmp2, 0 1382 // %carry2 = extractvalue {i32, i1} %tmp2, 1 1383 // %tmp3 = or i1 %carry1, %carry2 1384 // %tmp4 = zext i1 %tmp3 to i32 1385 // store i32 %tmp4, i32* %carryout 1386 1387 // Scalarize our inputs. 1388 llvm::Value *X = EmitScalarExpr(E->getArg(0)); 1389 llvm::Value *Y = EmitScalarExpr(E->getArg(1)); 1390 llvm::Value *Carryin = EmitScalarExpr(E->getArg(2)); 1391 std::pair<llvm::Value*, unsigned> CarryOutPtr = 1392 EmitPointerWithAlignment(E->getArg(3)); 1393 1394 // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow. 1395 llvm::Intrinsic::ID IntrinsicId; 1396 switch (BuiltinID) { 1397 default: llvm_unreachable("Unknown multiprecision builtin id."); 1398 case Builtin::BI__builtin_addcb: 1399 case Builtin::BI__builtin_addcs: 1400 case Builtin::BI__builtin_addc: 1401 case Builtin::BI__builtin_addcl: 1402 case Builtin::BI__builtin_addcll: 1403 IntrinsicId = llvm::Intrinsic::uadd_with_overflow; 1404 break; 1405 case Builtin::BI__builtin_subcb: 1406 case Builtin::BI__builtin_subcs: 1407 case Builtin::BI__builtin_subc: 1408 case Builtin::BI__builtin_subcl: 1409 case Builtin::BI__builtin_subcll: 1410 IntrinsicId = llvm::Intrinsic::usub_with_overflow; 1411 break; 1412 } 1413 1414 // Construct our resulting LLVM IR expression. 1415 llvm::Value *Carry1; 1416 llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId, 1417 X, Y, Carry1); 1418 llvm::Value *Carry2; 1419 llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId, 1420 Sum1, Carryin, Carry2); 1421 llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2), 1422 X->getType()); 1423 llvm::StoreInst *CarryOutStore = Builder.CreateStore(CarryOut, 1424 CarryOutPtr.first); 1425 CarryOutStore->setAlignment(CarryOutPtr.second); 1426 return RValue::get(Sum2); 1427 } 1428 case Builtin::BI__builtin_uadd_overflow: 1429 case Builtin::BI__builtin_uaddl_overflow: 1430 case Builtin::BI__builtin_uaddll_overflow: 1431 case Builtin::BI__builtin_usub_overflow: 1432 case Builtin::BI__builtin_usubl_overflow: 1433 case Builtin::BI__builtin_usubll_overflow: 1434 case Builtin::BI__builtin_umul_overflow: 1435 case Builtin::BI__builtin_umull_overflow: 1436 case Builtin::BI__builtin_umulll_overflow: 1437 case Builtin::BI__builtin_sadd_overflow: 1438 case Builtin::BI__builtin_saddl_overflow: 1439 case Builtin::BI__builtin_saddll_overflow: 1440 case Builtin::BI__builtin_ssub_overflow: 1441 case Builtin::BI__builtin_ssubl_overflow: 1442 case Builtin::BI__builtin_ssubll_overflow: 1443 case Builtin::BI__builtin_smul_overflow: 1444 case Builtin::BI__builtin_smull_overflow: 1445 case Builtin::BI__builtin_smulll_overflow: { 1446 1447 // We translate all of these builtins directly to the relevant llvm IR node. 1448 1449 // Scalarize our inputs. 1450 llvm::Value *X = EmitScalarExpr(E->getArg(0)); 1451 llvm::Value *Y = EmitScalarExpr(E->getArg(1)); 1452 std::pair<llvm::Value *, unsigned> SumOutPtr = 1453 EmitPointerWithAlignment(E->getArg(2)); 1454 1455 // Decide which of the overflow intrinsics we are lowering to: 1456 llvm::Intrinsic::ID IntrinsicId; 1457 switch (BuiltinID) { 1458 default: llvm_unreachable("Unknown security overflow builtin id."); 1459 case Builtin::BI__builtin_uadd_overflow: 1460 case Builtin::BI__builtin_uaddl_overflow: 1461 case Builtin::BI__builtin_uaddll_overflow: 1462 IntrinsicId = llvm::Intrinsic::uadd_with_overflow; 1463 break; 1464 case Builtin::BI__builtin_usub_overflow: 1465 case Builtin::BI__builtin_usubl_overflow: 1466 case Builtin::BI__builtin_usubll_overflow: 1467 IntrinsicId = llvm::Intrinsic::usub_with_overflow; 1468 break; 1469 case Builtin::BI__builtin_umul_overflow: 1470 case Builtin::BI__builtin_umull_overflow: 1471 case Builtin::BI__builtin_umulll_overflow: 1472 IntrinsicId = llvm::Intrinsic::umul_with_overflow; 1473 break; 1474 case Builtin::BI__builtin_sadd_overflow: 1475 case Builtin::BI__builtin_saddl_overflow: 1476 case Builtin::BI__builtin_saddll_overflow: 1477 IntrinsicId = llvm::Intrinsic::sadd_with_overflow; 1478 break; 1479 case Builtin::BI__builtin_ssub_overflow: 1480 case Builtin::BI__builtin_ssubl_overflow: 1481 case Builtin::BI__builtin_ssubll_overflow: 1482 IntrinsicId = llvm::Intrinsic::ssub_with_overflow; 1483 break; 1484 case Builtin::BI__builtin_smul_overflow: 1485 case Builtin::BI__builtin_smull_overflow: 1486 case Builtin::BI__builtin_smulll_overflow: 1487 IntrinsicId = llvm::Intrinsic::smul_with_overflow; 1488 break; 1489 } 1490 1491 1492 llvm::Value *Carry; 1493 llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry); 1494 llvm::StoreInst *SumOutStore = Builder.CreateStore(Sum, SumOutPtr.first); 1495 SumOutStore->setAlignment(SumOutPtr.second); 1496 1497 return RValue::get(Carry); 1498 } 1499 case Builtin::BI__builtin_addressof: 1500 return RValue::get(EmitLValue(E->getArg(0)).getAddress()); 1501 case Builtin::BI__noop: 1502 return RValue::get(0); 1503 } 1504 1505 // If this is an alias for a lib function (e.g. __builtin_sin), emit 1506 // the call using the normal call path, but using the unmangled 1507 // version of the function name. 1508 if (getContext().BuiltinInfo.isLibFunction(BuiltinID)) 1509 return emitLibraryCall(*this, FD, E, 1510 CGM.getBuiltinLibFunction(FD, BuiltinID)); 1511 1512 // If this is a predefined lib function (e.g. malloc), emit the call 1513 // using exactly the normal call path. 1514 if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID)) 1515 return emitLibraryCall(*this, FD, E, EmitScalarExpr(E->getCallee())); 1516 1517 // See if we have a target specific intrinsic. 1518 const char *Name = getContext().BuiltinInfo.GetName(BuiltinID); 1519 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic; 1520 if (const char *Prefix = 1521 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch())) 1522 IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix, Name); 1523 1524 if (IntrinsicID != Intrinsic::not_intrinsic) { 1525 SmallVector<Value*, 16> Args; 1526 1527 // Find out if any arguments are required to be integer constant 1528 // expressions. 1529 unsigned ICEArguments = 0; 1530 ASTContext::GetBuiltinTypeError Error; 1531 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); 1532 assert(Error == ASTContext::GE_None && "Should not codegen an error"); 1533 1534 Function *F = CGM.getIntrinsic(IntrinsicID); 1535 llvm::FunctionType *FTy = F->getFunctionType(); 1536 1537 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) { 1538 Value *ArgValue; 1539 // If this is a normal argument, just emit it as a scalar. 1540 if ((ICEArguments & (1 << i)) == 0) { 1541 ArgValue = EmitScalarExpr(E->getArg(i)); 1542 } else { 1543 // If this is required to be a constant, constant fold it so that we 1544 // know that the generated intrinsic gets a ConstantInt. 1545 llvm::APSInt Result; 1546 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result,getContext()); 1547 assert(IsConst && "Constant arg isn't actually constant?"); 1548 (void)IsConst; 1549 ArgValue = llvm::ConstantInt::get(getLLVMContext(), Result); 1550 } 1551 1552 // If the intrinsic arg type is different from the builtin arg type 1553 // we need to do a bit cast. 1554 llvm::Type *PTy = FTy->getParamType(i); 1555 if (PTy != ArgValue->getType()) { 1556 assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) && 1557 "Must be able to losslessly bit cast to param"); 1558 ArgValue = Builder.CreateBitCast(ArgValue, PTy); 1559 } 1560 1561 Args.push_back(ArgValue); 1562 } 1563 1564 Value *V = Builder.CreateCall(F, Args); 1565 QualType BuiltinRetType = E->getType(); 1566 1567 llvm::Type *RetTy = VoidTy; 1568 if (!BuiltinRetType->isVoidType()) 1569 RetTy = ConvertType(BuiltinRetType); 1570 1571 if (RetTy != V->getType()) { 1572 assert(V->getType()->canLosslesslyBitCastTo(RetTy) && 1573 "Must be able to losslessly bit cast result type"); 1574 V = Builder.CreateBitCast(V, RetTy); 1575 } 1576 1577 return RValue::get(V); 1578 } 1579 1580 // See if we have a target specific builtin that needs to be lowered. 1581 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E)) 1582 return RValue::get(V); 1583 1584 ErrorUnsupported(E, "builtin function"); 1585 1586 // Unknown builtin, for now just dump it out and return undef. 1587 return GetUndefRValue(E->getType()); 1588} 1589 1590Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID, 1591 const CallExpr *E) { 1592 switch (getTarget().getTriple().getArch()) { 1593 case llvm::Triple::aarch64: 1594 return EmitAArch64BuiltinExpr(BuiltinID, E); 1595 case llvm::Triple::arm: 1596 case llvm::Triple::thumb: 1597 return EmitARMBuiltinExpr(BuiltinID, E); 1598 case llvm::Triple::x86: 1599 case llvm::Triple::x86_64: 1600 return EmitX86BuiltinExpr(BuiltinID, E); 1601 case llvm::Triple::ppc: 1602 case llvm::Triple::ppc64: 1603 case llvm::Triple::ppc64le: 1604 return EmitPPCBuiltinExpr(BuiltinID, E); 1605 default: 1606 return 0; 1607 } 1608} 1609 1610static llvm::VectorType *GetNeonType(CodeGenFunction *CGF, 1611 NeonTypeFlags TypeFlags, 1612 bool V1Ty=false) { 1613 int IsQuad = TypeFlags.isQuad(); 1614 switch (TypeFlags.getEltType()) { 1615 case NeonTypeFlags::Int8: 1616 case NeonTypeFlags::Poly8: 1617 return llvm::VectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad)); 1618 case NeonTypeFlags::Int16: 1619 case NeonTypeFlags::Poly16: 1620 case NeonTypeFlags::Float16: 1621 return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad)); 1622 case NeonTypeFlags::Int32: 1623 return llvm::VectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad)); 1624 case NeonTypeFlags::Int64: 1625 case NeonTypeFlags::Poly64: 1626 return llvm::VectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad)); 1627 case NeonTypeFlags::Float32: 1628 return llvm::VectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad)); 1629 case NeonTypeFlags::Float64: 1630 return llvm::VectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad)); 1631 } 1632 llvm_unreachable("Unknown vector element type!"); 1633} 1634 1635Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) { 1636 unsigned nElts = cast<llvm::VectorType>(V->getType())->getNumElements(); 1637 Value* SV = llvm::ConstantVector::getSplat(nElts, C); 1638 return Builder.CreateShuffleVector(V, V, SV, "lane"); 1639} 1640 1641Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops, 1642 const char *name, 1643 unsigned shift, bool rightshift) { 1644 unsigned j = 0; 1645 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end(); 1646 ai != ae; ++ai, ++j) 1647 if (shift > 0 && shift == j) 1648 Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift); 1649 else 1650 Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name); 1651 1652 return Builder.CreateCall(F, Ops, name); 1653} 1654 1655Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty, 1656 bool neg) { 1657 int SV = cast<ConstantInt>(V)->getSExtValue(); 1658 1659 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty); 1660 llvm::Constant *C = ConstantInt::get(VTy->getElementType(), neg ? -SV : SV); 1661 return llvm::ConstantVector::getSplat(VTy->getNumElements(), C); 1662} 1663 1664// \brief Right-shift a vector by a constant. 1665Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift, 1666 llvm::Type *Ty, bool usgn, 1667 const char *name) { 1668 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty); 1669 1670 int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue(); 1671 int EltSize = VTy->getScalarSizeInBits(); 1672 1673 Vec = Builder.CreateBitCast(Vec, Ty); 1674 1675 // lshr/ashr are undefined when the shift amount is equal to the vector 1676 // element size. 1677 if (ShiftAmt == EltSize) { 1678 if (usgn) { 1679 // Right-shifting an unsigned value by its size yields 0. 1680 llvm::Constant *Zero = ConstantInt::get(VTy->getElementType(), 0); 1681 return llvm::ConstantVector::getSplat(VTy->getNumElements(), Zero); 1682 } else { 1683 // Right-shifting a signed value by its size is equivalent 1684 // to a shift of size-1. 1685 --ShiftAmt; 1686 Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt); 1687 } 1688 } 1689 1690 Shift = EmitNeonShiftVector(Shift, Ty, false); 1691 if (usgn) 1692 return Builder.CreateLShr(Vec, Shift, name); 1693 else 1694 return Builder.CreateAShr(Vec, Shift, name); 1695} 1696 1697/// GetPointeeAlignment - Given an expression with a pointer type, find the 1698/// alignment of the type referenced by the pointer. Skip over implicit 1699/// casts. 1700std::pair<llvm::Value*, unsigned> 1701CodeGenFunction::EmitPointerWithAlignment(const Expr *Addr) { 1702 assert(Addr->getType()->isPointerType()); 1703 Addr = Addr->IgnoreParens(); 1704 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Addr)) { 1705 if ((ICE->getCastKind() == CK_BitCast || ICE->getCastKind() == CK_NoOp) && 1706 ICE->getSubExpr()->getType()->isPointerType()) { 1707 std::pair<llvm::Value*, unsigned> Ptr = 1708 EmitPointerWithAlignment(ICE->getSubExpr()); 1709 Ptr.first = Builder.CreateBitCast(Ptr.first, 1710 ConvertType(Addr->getType())); 1711 return Ptr; 1712 } else if (ICE->getCastKind() == CK_ArrayToPointerDecay) { 1713 LValue LV = EmitLValue(ICE->getSubExpr()); 1714 unsigned Align = LV.getAlignment().getQuantity(); 1715 if (!Align) { 1716 // FIXME: Once LValues are fixed to always set alignment, 1717 // zap this code. 1718 QualType PtTy = ICE->getSubExpr()->getType(); 1719 if (!PtTy->isIncompleteType()) 1720 Align = getContext().getTypeAlignInChars(PtTy).getQuantity(); 1721 else 1722 Align = 1; 1723 } 1724 return std::make_pair(LV.getAddress(), Align); 1725 } 1726 } 1727 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Addr)) { 1728 if (UO->getOpcode() == UO_AddrOf) { 1729 LValue LV = EmitLValue(UO->getSubExpr()); 1730 unsigned Align = LV.getAlignment().getQuantity(); 1731 if (!Align) { 1732 // FIXME: Once LValues are fixed to always set alignment, 1733 // zap this code. 1734 QualType PtTy = UO->getSubExpr()->getType(); 1735 if (!PtTy->isIncompleteType()) 1736 Align = getContext().getTypeAlignInChars(PtTy).getQuantity(); 1737 else 1738 Align = 1; 1739 } 1740 return std::make_pair(LV.getAddress(), Align); 1741 } 1742 } 1743 1744 unsigned Align = 1; 1745 QualType PtTy = Addr->getType()->getPointeeType(); 1746 if (!PtTy->isIncompleteType()) 1747 Align = getContext().getTypeAlignInChars(PtTy).getQuantity(); 1748 1749 return std::make_pair(EmitScalarExpr(Addr), Align); 1750} 1751 1752static Value *EmitAArch64ScalarBuiltinExpr(CodeGenFunction &CGF, 1753 unsigned BuiltinID, 1754 const CallExpr *E) { 1755 unsigned int Int = 0; 1756 // Scalar result generated across vectors 1757 bool AcrossVec = false; 1758 // Extend element of one-element vector 1759 bool ExtendEle = false; 1760 bool OverloadInt = false; 1761 bool OverloadCmpInt = false; 1762 bool OverloadWideInt = false; 1763 bool OverloadNarrowInt = false; 1764 const char *s = NULL; 1765 1766 SmallVector<Value *, 4> Ops; 1767 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) { 1768 Ops.push_back(CGF.EmitScalarExpr(E->getArg(i))); 1769 } 1770 1771 // AArch64 scalar builtins are not overloaded, they do not have an extra 1772 // argument that specifies the vector type, need to handle each case. 1773 switch (BuiltinID) { 1774 default: break; 1775 case AArch64::BI__builtin_neon_vdups_lane_f32: 1776 case AArch64::BI__builtin_neon_vdupd_lane_f64: 1777 case AArch64::BI__builtin_neon_vdups_laneq_f32: 1778 case AArch64::BI__builtin_neon_vdupd_laneq_f64: { 1779 return CGF.Builder.CreateExtractElement(Ops[0], Ops[1], "vdup_lane"); 1780 } 1781 case AArch64::BI__builtin_neon_vdupb_lane_i8: 1782 case AArch64::BI__builtin_neon_vduph_lane_i16: 1783 case AArch64::BI__builtin_neon_vdups_lane_i32: 1784 case AArch64::BI__builtin_neon_vdupd_lane_i64: 1785 case AArch64::BI__builtin_neon_vdupb_laneq_i8: 1786 case AArch64::BI__builtin_neon_vduph_laneq_i16: 1787 case AArch64::BI__builtin_neon_vdups_laneq_i32: 1788 case AArch64::BI__builtin_neon_vdupd_laneq_i64: { 1789 // The backend treats Neon scalar types as v1ix types 1790 // So we want to dup lane from any vector to v1ix vector 1791 // with shufflevector 1792 s = "vdup_lane"; 1793 Value* SV = llvm::ConstantVector::getSplat(1, cast<ConstantInt>(Ops[1])); 1794 Value *Result = CGF.Builder.CreateShuffleVector(Ops[0], Ops[0], SV, s); 1795 llvm::Type *Ty = CGF.ConvertType(E->getCallReturnType()); 1796 // AArch64 intrinsic one-element vector type cast to 1797 // scalar type expected by the builtin 1798 return CGF.Builder.CreateBitCast(Result, Ty, s); 1799 } 1800 case AArch64::BI__builtin_neon_vqdmlalh_lane_s16 : 1801 case AArch64::BI__builtin_neon_vqdmlalh_laneq_s16 : 1802 case AArch64::BI__builtin_neon_vqdmlals_lane_s32 : 1803 case AArch64::BI__builtin_neon_vqdmlals_laneq_s32 : 1804 case AArch64::BI__builtin_neon_vqdmlslh_lane_s16 : 1805 case AArch64::BI__builtin_neon_vqdmlslh_laneq_s16 : 1806 case AArch64::BI__builtin_neon_vqdmlsls_lane_s32 : 1807 case AArch64::BI__builtin_neon_vqdmlsls_laneq_s32 : { 1808 Int = Intrinsic::arm_neon_vqadds; 1809 if (BuiltinID == AArch64::BI__builtin_neon_vqdmlslh_lane_s16 || 1810 BuiltinID == AArch64::BI__builtin_neon_vqdmlslh_laneq_s16 || 1811 BuiltinID == AArch64::BI__builtin_neon_vqdmlsls_lane_s32 || 1812 BuiltinID == AArch64::BI__builtin_neon_vqdmlsls_laneq_s32) { 1813 Int = Intrinsic::arm_neon_vqsubs; 1814 } 1815 // create vqdmull call with b * c[i] 1816 llvm::Type *Ty = CGF.ConvertType(E->getArg(1)->getType()); 1817 llvm::VectorType *OpVTy = llvm::VectorType::get(Ty, 1); 1818 Ty = CGF.ConvertType(E->getArg(0)->getType()); 1819 llvm::VectorType *ResVTy = llvm::VectorType::get(Ty, 1); 1820 Value *F = CGF.CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, ResVTy); 1821 Value *V = UndefValue::get(OpVTy); 1822 llvm::Constant *CI = ConstantInt::get(CGF.Int32Ty, 0); 1823 SmallVector<Value *, 2> MulOps; 1824 MulOps.push_back(Ops[1]); 1825 MulOps.push_back(Ops[2]); 1826 MulOps[0] = CGF.Builder.CreateInsertElement(V, MulOps[0], CI); 1827 MulOps[1] = CGF.Builder.CreateExtractElement(MulOps[1], Ops[3], "extract"); 1828 MulOps[1] = CGF.Builder.CreateInsertElement(V, MulOps[1], CI); 1829 Value *MulRes = CGF.Builder.CreateCall2(F, MulOps[0], MulOps[1]); 1830 // create vqadds call with a +/- vqdmull result 1831 F = CGF.CGM.getIntrinsic(Int, ResVTy); 1832 SmallVector<Value *, 2> AddOps; 1833 AddOps.push_back(Ops[0]); 1834 AddOps.push_back(MulRes); 1835 V = UndefValue::get(ResVTy); 1836 AddOps[0] = CGF.Builder.CreateInsertElement(V, AddOps[0], CI); 1837 Value *AddRes = CGF.Builder.CreateCall2(F, AddOps[0], AddOps[1]); 1838 return CGF.Builder.CreateBitCast(AddRes, Ty); 1839 } 1840 case AArch64::BI__builtin_neon_vfmas_lane_f32: 1841 case AArch64::BI__builtin_neon_vfmas_laneq_f32: 1842 case AArch64::BI__builtin_neon_vfmad_lane_f64: 1843 case AArch64::BI__builtin_neon_vfmad_laneq_f64: { 1844 llvm::Type *Ty = CGF.ConvertType(E->getCallReturnType()); 1845 Value *F = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty); 1846 Ops[2] = CGF.Builder.CreateExtractElement(Ops[2], Ops[3], "extract"); 1847 return CGF.Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]); 1848 } 1849 // Scalar Floating-point Multiply Extended 1850 case AArch64::BI__builtin_neon_vmulxs_f32: 1851 case AArch64::BI__builtin_neon_vmulxd_f64: { 1852 Int = Intrinsic::aarch64_neon_vmulx; 1853 llvm::Type *Ty = CGF.ConvertType(E->getCallReturnType()); 1854 return CGF.EmitNeonCall(CGF.CGM.getIntrinsic(Int, Ty), Ops, "vmulx"); 1855 } 1856 case AArch64::BI__builtin_neon_vmul_n_f64: { 1857 // v1f64 vmul_n_f64 should be mapped to Neon scalar mul lane 1858 llvm::Type *VTy = GetNeonType(&CGF, 1859 NeonTypeFlags(NeonTypeFlags::Float64, false, false)); 1860 Ops[0] = CGF.Builder.CreateBitCast(Ops[0], VTy); 1861 llvm::Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, 0); 1862 Ops[0] = CGF.Builder.CreateExtractElement(Ops[0], Idx, "extract"); 1863 Value *Result = CGF.Builder.CreateFMul(Ops[0], Ops[1]); 1864 return CGF.Builder.CreateBitCast(Result, VTy); 1865 } 1866 case AArch64::BI__builtin_neon_vget_lane_i8: 1867 case AArch64::BI__builtin_neon_vget_lane_i16: 1868 case AArch64::BI__builtin_neon_vget_lane_i32: 1869 case AArch64::BI__builtin_neon_vget_lane_i64: 1870 case AArch64::BI__builtin_neon_vget_lane_f32: 1871 case AArch64::BI__builtin_neon_vget_lane_f64: 1872 case AArch64::BI__builtin_neon_vgetq_lane_i8: 1873 case AArch64::BI__builtin_neon_vgetq_lane_i16: 1874 case AArch64::BI__builtin_neon_vgetq_lane_i32: 1875 case AArch64::BI__builtin_neon_vgetq_lane_i64: 1876 case AArch64::BI__builtin_neon_vgetq_lane_f32: 1877 case AArch64::BI__builtin_neon_vgetq_lane_f64: 1878 return CGF.EmitARMBuiltinExpr(ARM::BI__builtin_neon_vget_lane_i8, E); 1879 case AArch64::BI__builtin_neon_vset_lane_i8: 1880 case AArch64::BI__builtin_neon_vset_lane_i16: 1881 case AArch64::BI__builtin_neon_vset_lane_i32: 1882 case AArch64::BI__builtin_neon_vset_lane_i64: 1883 case AArch64::BI__builtin_neon_vset_lane_f32: 1884 case AArch64::BI__builtin_neon_vset_lane_f64: 1885 case AArch64::BI__builtin_neon_vsetq_lane_i8: 1886 case AArch64::BI__builtin_neon_vsetq_lane_i16: 1887 case AArch64::BI__builtin_neon_vsetq_lane_i32: 1888 case AArch64::BI__builtin_neon_vsetq_lane_i64: 1889 case AArch64::BI__builtin_neon_vsetq_lane_f32: 1890 case AArch64::BI__builtin_neon_vsetq_lane_f64: 1891 return CGF.EmitARMBuiltinExpr(ARM::BI__builtin_neon_vset_lane_i8, E); 1892 // Crypto 1893 case AArch64::BI__builtin_neon_vsha1h_u32: 1894 Int = Intrinsic::arm_neon_sha1h; 1895 s = "sha1h"; OverloadInt = true; break; 1896 case AArch64::BI__builtin_neon_vsha1cq_u32: 1897 Int = Intrinsic::aarch64_neon_sha1c; 1898 s = "sha1c"; break; 1899 case AArch64::BI__builtin_neon_vsha1pq_u32: 1900 Int = Intrinsic::aarch64_neon_sha1p; 1901 s = "sha1p"; break; 1902 case AArch64::BI__builtin_neon_vsha1mq_u32: 1903 Int = Intrinsic::aarch64_neon_sha1m; 1904 s = "sha1m"; break; 1905 // Scalar Add 1906 case AArch64::BI__builtin_neon_vaddd_s64: 1907 Int = Intrinsic::aarch64_neon_vaddds; 1908 s = "vaddds"; break; 1909 case AArch64::BI__builtin_neon_vaddd_u64: 1910 Int = Intrinsic::aarch64_neon_vadddu; 1911 s = "vadddu"; break; 1912 // Scalar Sub 1913 case AArch64::BI__builtin_neon_vsubd_s64: 1914 Int = Intrinsic::aarch64_neon_vsubds; 1915 s = "vsubds"; break; 1916 case AArch64::BI__builtin_neon_vsubd_u64: 1917 Int = Intrinsic::aarch64_neon_vsubdu; 1918 s = "vsubdu"; break; 1919 // Scalar Saturating Add 1920 case AArch64::BI__builtin_neon_vqaddb_s8: 1921 case AArch64::BI__builtin_neon_vqaddh_s16: 1922 case AArch64::BI__builtin_neon_vqadds_s32: 1923 case AArch64::BI__builtin_neon_vqaddd_s64: 1924 Int = Intrinsic::arm_neon_vqadds; 1925 s = "vqadds"; OverloadInt = true; break; 1926 case AArch64::BI__builtin_neon_vqaddb_u8: 1927 case AArch64::BI__builtin_neon_vqaddh_u16: 1928 case AArch64::BI__builtin_neon_vqadds_u32: 1929 case AArch64::BI__builtin_neon_vqaddd_u64: 1930 Int = Intrinsic::arm_neon_vqaddu; 1931 s = "vqaddu"; OverloadInt = true; break; 1932 // Scalar Saturating Sub 1933 case AArch64::BI__builtin_neon_vqsubb_s8: 1934 case AArch64::BI__builtin_neon_vqsubh_s16: 1935 case AArch64::BI__builtin_neon_vqsubs_s32: 1936 case AArch64::BI__builtin_neon_vqsubd_s64: 1937 Int = Intrinsic::arm_neon_vqsubs; 1938 s = "vqsubs"; OverloadInt = true; break; 1939 case AArch64::BI__builtin_neon_vqsubb_u8: 1940 case AArch64::BI__builtin_neon_vqsubh_u16: 1941 case AArch64::BI__builtin_neon_vqsubs_u32: 1942 case AArch64::BI__builtin_neon_vqsubd_u64: 1943 Int = Intrinsic::arm_neon_vqsubu; 1944 s = "vqsubu"; OverloadInt = true; break; 1945 // Scalar Shift Left 1946 case AArch64::BI__builtin_neon_vshld_s64: 1947 Int = Intrinsic::aarch64_neon_vshlds; 1948 s = "vshlds"; break; 1949 case AArch64::BI__builtin_neon_vshld_u64: 1950 Int = Intrinsic::aarch64_neon_vshldu; 1951 s = "vshldu"; break; 1952 // Scalar Saturating Shift Left 1953 case AArch64::BI__builtin_neon_vqshlb_s8: 1954 case AArch64::BI__builtin_neon_vqshlh_s16: 1955 case AArch64::BI__builtin_neon_vqshls_s32: 1956 case AArch64::BI__builtin_neon_vqshld_s64: 1957 Int = Intrinsic::aarch64_neon_vqshls; 1958 s = "vqshls"; OverloadInt = true; break; 1959 case AArch64::BI__builtin_neon_vqshlb_u8: 1960 case AArch64::BI__builtin_neon_vqshlh_u16: 1961 case AArch64::BI__builtin_neon_vqshls_u32: 1962 case AArch64::BI__builtin_neon_vqshld_u64: 1963 Int = Intrinsic::aarch64_neon_vqshlu; 1964 s = "vqshlu"; OverloadInt = true; break; 1965 // Scalar Rouding Shift Left 1966 case AArch64::BI__builtin_neon_vrshld_s64: 1967 Int = Intrinsic::aarch64_neon_vrshlds; 1968 s = "vrshlds"; break; 1969 case AArch64::BI__builtin_neon_vrshld_u64: 1970 Int = Intrinsic::aarch64_neon_vrshldu; 1971 s = "vrshldu"; break; 1972 // Scalar Saturating Rouding Shift Left 1973 case AArch64::BI__builtin_neon_vqrshlb_s8: 1974 case AArch64::BI__builtin_neon_vqrshlh_s16: 1975 case AArch64::BI__builtin_neon_vqrshls_s32: 1976 case AArch64::BI__builtin_neon_vqrshld_s64: 1977 Int = Intrinsic::aarch64_neon_vqrshls; 1978 s = "vqrshls"; OverloadInt = true; break; 1979 case AArch64::BI__builtin_neon_vqrshlb_u8: 1980 case AArch64::BI__builtin_neon_vqrshlh_u16: 1981 case AArch64::BI__builtin_neon_vqrshls_u32: 1982 case AArch64::BI__builtin_neon_vqrshld_u64: 1983 Int = Intrinsic::aarch64_neon_vqrshlu; 1984 s = "vqrshlu"; OverloadInt = true; break; 1985 // Scalar Reduce Pairwise Add 1986 case AArch64::BI__builtin_neon_vpaddd_s64: 1987 Int = Intrinsic::aarch64_neon_vpadd; s = "vpadd"; 1988 break; 1989 case AArch64::BI__builtin_neon_vpadds_f32: 1990 Int = Intrinsic::aarch64_neon_vpfadd; s = "vpfadd"; 1991 break; 1992 case AArch64::BI__builtin_neon_vpaddd_f64: 1993 Int = Intrinsic::aarch64_neon_vpfaddq; s = "vpfaddq"; 1994 break; 1995 // Scalar Reduce Pairwise Floating Point Max 1996 case AArch64::BI__builtin_neon_vpmaxs_f32: 1997 Int = Intrinsic::aarch64_neon_vpmax; s = "vpmax"; 1998 break; 1999 case AArch64::BI__builtin_neon_vpmaxqd_f64: 2000 Int = Intrinsic::aarch64_neon_vpmaxq; s = "vpmaxq"; 2001 break; 2002 // Scalar Reduce Pairwise Floating Point Min 2003 case AArch64::BI__builtin_neon_vpmins_f32: 2004 Int = Intrinsic::aarch64_neon_vpmin; s = "vpmin"; 2005 break; 2006 case AArch64::BI__builtin_neon_vpminqd_f64: 2007 Int = Intrinsic::aarch64_neon_vpminq; s = "vpminq"; 2008 break; 2009 // Scalar Reduce Pairwise Floating Point Maxnm 2010 case AArch64::BI__builtin_neon_vpmaxnms_f32: 2011 Int = Intrinsic::aarch64_neon_vpfmaxnm; s = "vpfmaxnm"; 2012 break; 2013 case AArch64::BI__builtin_neon_vpmaxnmqd_f64: 2014 Int = Intrinsic::aarch64_neon_vpfmaxnmq; s = "vpfmaxnmq"; 2015 break; 2016 // Scalar Reduce Pairwise Floating Point Minnm 2017 case AArch64::BI__builtin_neon_vpminnms_f32: 2018 Int = Intrinsic::aarch64_neon_vpfminnm; s = "vpfminnm"; 2019 break; 2020 case AArch64::BI__builtin_neon_vpminnmqd_f64: 2021 Int = Intrinsic::aarch64_neon_vpfminnmq; s = "vpfminnmq"; 2022 break; 2023 // The followings are intrinsics with scalar results generated AcrossVec vectors 2024 case AArch64::BI__builtin_neon_vaddlv_s8: 2025 case AArch64::BI__builtin_neon_vaddlv_s16: 2026 case AArch64::BI__builtin_neon_vaddlvq_s8: 2027 case AArch64::BI__builtin_neon_vaddlvq_s16: 2028 case AArch64::BI__builtin_neon_vaddlvq_s32: 2029 Int = Intrinsic::aarch64_neon_saddlv; 2030 AcrossVec = true; ExtendEle = true; s = "saddlv"; break; 2031 case AArch64::BI__builtin_neon_vaddlv_u8: 2032 case AArch64::BI__builtin_neon_vaddlv_u16: 2033 case AArch64::BI__builtin_neon_vaddlvq_u8: 2034 case AArch64::BI__builtin_neon_vaddlvq_u16: 2035 case AArch64::BI__builtin_neon_vaddlvq_u32: 2036 Int = Intrinsic::aarch64_neon_uaddlv; 2037 AcrossVec = true; ExtendEle = true; s = "uaddlv"; break; 2038 case AArch64::BI__builtin_neon_vmaxv_s8: 2039 case AArch64::BI__builtin_neon_vmaxv_s16: 2040 case AArch64::BI__builtin_neon_vmaxvq_s8: 2041 case AArch64::BI__builtin_neon_vmaxvq_s16: 2042 case AArch64::BI__builtin_neon_vmaxvq_s32: 2043 Int = Intrinsic::aarch64_neon_smaxv; 2044 AcrossVec = true; ExtendEle = false; s = "smaxv"; break; 2045 case AArch64::BI__builtin_neon_vmaxv_u8: 2046 case AArch64::BI__builtin_neon_vmaxv_u16: 2047 case AArch64::BI__builtin_neon_vmaxvq_u8: 2048 case AArch64::BI__builtin_neon_vmaxvq_u16: 2049 case AArch64::BI__builtin_neon_vmaxvq_u32: 2050 Int = Intrinsic::aarch64_neon_umaxv; 2051 AcrossVec = true; ExtendEle = false; s = "umaxv"; break; 2052 case AArch64::BI__builtin_neon_vminv_s8: 2053 case AArch64::BI__builtin_neon_vminv_s16: 2054 case AArch64::BI__builtin_neon_vminvq_s8: 2055 case AArch64::BI__builtin_neon_vminvq_s16: 2056 case AArch64::BI__builtin_neon_vminvq_s32: 2057 Int = Intrinsic::aarch64_neon_sminv; 2058 AcrossVec = true; ExtendEle = false; s = "sminv"; break; 2059 case AArch64::BI__builtin_neon_vminv_u8: 2060 case AArch64::BI__builtin_neon_vminv_u16: 2061 case AArch64::BI__builtin_neon_vminvq_u8: 2062 case AArch64::BI__builtin_neon_vminvq_u16: 2063 case AArch64::BI__builtin_neon_vminvq_u32: 2064 Int = Intrinsic::aarch64_neon_uminv; 2065 AcrossVec = true; ExtendEle = false; s = "uminv"; break; 2066 case AArch64::BI__builtin_neon_vaddv_s8: 2067 case AArch64::BI__builtin_neon_vaddv_s16: 2068 case AArch64::BI__builtin_neon_vaddvq_s8: 2069 case AArch64::BI__builtin_neon_vaddvq_s16: 2070 case AArch64::BI__builtin_neon_vaddvq_s32: 2071 case AArch64::BI__builtin_neon_vaddv_u8: 2072 case AArch64::BI__builtin_neon_vaddv_u16: 2073 case AArch64::BI__builtin_neon_vaddvq_u8: 2074 case AArch64::BI__builtin_neon_vaddvq_u16: 2075 case AArch64::BI__builtin_neon_vaddvq_u32: 2076 Int = Intrinsic::aarch64_neon_vaddv; 2077 AcrossVec = true; ExtendEle = false; s = "vaddv"; break; 2078 case AArch64::BI__builtin_neon_vmaxvq_f32: 2079 Int = Intrinsic::aarch64_neon_vmaxv; 2080 AcrossVec = true; ExtendEle = false; s = "vmaxv"; break; 2081 case AArch64::BI__builtin_neon_vminvq_f32: 2082 Int = Intrinsic::aarch64_neon_vminv; 2083 AcrossVec = true; ExtendEle = false; s = "vminv"; break; 2084 case AArch64::BI__builtin_neon_vmaxnmvq_f32: 2085 Int = Intrinsic::aarch64_neon_vmaxnmv; 2086 AcrossVec = true; ExtendEle = false; s = "vmaxnmv"; break; 2087 case AArch64::BI__builtin_neon_vminnmvq_f32: 2088 Int = Intrinsic::aarch64_neon_vminnmv; 2089 AcrossVec = true; ExtendEle = false; s = "vminnmv"; break; 2090 // Scalar Integer Saturating Doubling Multiply Half High 2091 case AArch64::BI__builtin_neon_vqdmulhh_s16: 2092 case AArch64::BI__builtin_neon_vqdmulhs_s32: 2093 Int = Intrinsic::arm_neon_vqdmulh; 2094 s = "vqdmulh"; OverloadInt = true; break; 2095 // Scalar Integer Saturating Rounding Doubling Multiply Half High 2096 case AArch64::BI__builtin_neon_vqrdmulhh_s16: 2097 case AArch64::BI__builtin_neon_vqrdmulhs_s32: 2098 Int = Intrinsic::arm_neon_vqrdmulh; 2099 s = "vqrdmulh"; OverloadInt = true; break; 2100 // Scalar Floating-point Reciprocal Step and 2101 case AArch64::BI__builtin_neon_vrecpss_f32: 2102 case AArch64::BI__builtin_neon_vrecpsd_f64: 2103 Int = Intrinsic::arm_neon_vrecps; 2104 s = "vrecps"; OverloadInt = true; break; 2105 // Scalar Floating-point Reciprocal Square Root Step 2106 case AArch64::BI__builtin_neon_vrsqrtss_f32: 2107 case AArch64::BI__builtin_neon_vrsqrtsd_f64: 2108 Int = Intrinsic::arm_neon_vrsqrts; 2109 s = "vrsqrts"; OverloadInt = true; break; 2110 // Scalar Signed Integer Convert To Floating-point 2111 case AArch64::BI__builtin_neon_vcvts_f32_s32: 2112 Int = Intrinsic::aarch64_neon_vcvtf32_s32, 2113 s = "vcvtf"; OverloadInt = false; break; 2114 case AArch64::BI__builtin_neon_vcvtd_f64_s64: 2115 Int = Intrinsic::aarch64_neon_vcvtf64_s64, 2116 s = "vcvtf"; OverloadInt = false; break; 2117 // Scalar Unsigned Integer Convert To Floating-point 2118 case AArch64::BI__builtin_neon_vcvts_f32_u32: 2119 Int = Intrinsic::aarch64_neon_vcvtf32_u32, 2120 s = "vcvtf"; OverloadInt = false; break; 2121 case AArch64::BI__builtin_neon_vcvtd_f64_u64: 2122 Int = Intrinsic::aarch64_neon_vcvtf64_u64, 2123 s = "vcvtf"; OverloadInt = false; break; 2124 // Scalar Floating-point Reciprocal Estimate 2125 case AArch64::BI__builtin_neon_vrecpes_f32: 2126 case AArch64::BI__builtin_neon_vrecped_f64: 2127 Int = Intrinsic::arm_neon_vrecpe; 2128 s = "vrecpe"; OverloadInt = true; break; 2129 // Scalar Floating-point Reciprocal Exponent 2130 case AArch64::BI__builtin_neon_vrecpxs_f32: 2131 case AArch64::BI__builtin_neon_vrecpxd_f64: 2132 Int = Intrinsic::aarch64_neon_vrecpx; 2133 s = "vrecpx"; OverloadInt = true; break; 2134 // Scalar Floating-point Reciprocal Square Root Estimate 2135 case AArch64::BI__builtin_neon_vrsqrtes_f32: 2136 case AArch64::BI__builtin_neon_vrsqrted_f64: 2137 Int = Intrinsic::arm_neon_vrsqrte; 2138 s = "vrsqrte"; OverloadInt = true; break; 2139 // Scalar Compare Equal 2140 case AArch64::BI__builtin_neon_vceqd_s64: 2141 case AArch64::BI__builtin_neon_vceqd_u64: 2142 Int = Intrinsic::aarch64_neon_vceq; s = "vceq"; 2143 OverloadCmpInt = true; break; 2144 // Scalar Compare Equal To Zero 2145 case AArch64::BI__builtin_neon_vceqzd_s64: 2146 case AArch64::BI__builtin_neon_vceqzd_u64: 2147 Int = Intrinsic::aarch64_neon_vceq; s = "vceq"; 2148 // Add implicit zero operand. 2149 Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType())); 2150 OverloadCmpInt = true; break; 2151 // Scalar Compare Greater Than or Equal 2152 case AArch64::BI__builtin_neon_vcged_s64: 2153 Int = Intrinsic::aarch64_neon_vcge; s = "vcge"; 2154 OverloadCmpInt = true; break; 2155 case AArch64::BI__builtin_neon_vcged_u64: 2156 Int = Intrinsic::aarch64_neon_vchs; s = "vcge"; 2157 OverloadCmpInt = true; break; 2158 // Scalar Compare Greater Than or Equal To Zero 2159 case AArch64::BI__builtin_neon_vcgezd_s64: 2160 Int = Intrinsic::aarch64_neon_vcge; s = "vcge"; 2161 // Add implicit zero operand. 2162 Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType())); 2163 OverloadCmpInt = true; break; 2164 // Scalar Compare Greater Than 2165 case AArch64::BI__builtin_neon_vcgtd_s64: 2166 Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt"; 2167 OverloadCmpInt = true; break; 2168 case AArch64::BI__builtin_neon_vcgtd_u64: 2169 Int = Intrinsic::aarch64_neon_vchi; s = "vcgt"; 2170 OverloadCmpInt = true; break; 2171 // Scalar Compare Greater Than Zero 2172 case AArch64::BI__builtin_neon_vcgtzd_s64: 2173 Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt"; 2174 // Add implicit zero operand. 2175 Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType())); 2176 OverloadCmpInt = true; break; 2177 // Scalar Compare Less Than or Equal 2178 case AArch64::BI__builtin_neon_vcled_s64: 2179 Int = Intrinsic::aarch64_neon_vcge; s = "vcge"; 2180 OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break; 2181 case AArch64::BI__builtin_neon_vcled_u64: 2182 Int = Intrinsic::aarch64_neon_vchs; s = "vchs"; 2183 OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break; 2184 // Scalar Compare Less Than or Equal To Zero 2185 case AArch64::BI__builtin_neon_vclezd_s64: 2186 Int = Intrinsic::aarch64_neon_vclez; s = "vcle"; 2187 // Add implicit zero operand. 2188 Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType())); 2189 OverloadCmpInt = true; break; 2190 // Scalar Compare Less Than 2191 case AArch64::BI__builtin_neon_vcltd_s64: 2192 Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt"; 2193 OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break; 2194 case AArch64::BI__builtin_neon_vcltd_u64: 2195 Int = Intrinsic::aarch64_neon_vchi; s = "vchi"; 2196 OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break; 2197 // Scalar Compare Less Than Zero 2198 case AArch64::BI__builtin_neon_vcltzd_s64: 2199 Int = Intrinsic::aarch64_neon_vcltz; s = "vclt"; 2200 // Add implicit zero operand. 2201 Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType())); 2202 OverloadCmpInt = true; break; 2203 // Scalar Floating-point Compare Equal 2204 case AArch64::BI__builtin_neon_vceqs_f32: 2205 case AArch64::BI__builtin_neon_vceqd_f64: 2206 Int = Intrinsic::aarch64_neon_vceq; s = "vceq"; 2207 OverloadCmpInt = true; break; 2208 // Scalar Floating-point Compare Equal To Zero 2209 case AArch64::BI__builtin_neon_vceqzs_f32: 2210 case AArch64::BI__builtin_neon_vceqzd_f64: 2211 Int = Intrinsic::aarch64_neon_vceq; s = "vceq"; 2212 // Add implicit zero operand. 2213 Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType())); 2214 OverloadCmpInt = true; break; 2215 // Scalar Floating-point Compare Greater Than Or Equal 2216 case AArch64::BI__builtin_neon_vcges_f32: 2217 case AArch64::BI__builtin_neon_vcged_f64: 2218 Int = Intrinsic::aarch64_neon_vcge; s = "vcge"; 2219 OverloadCmpInt = true; break; 2220 // Scalar Floating-point Compare Greater Than Or Equal To Zero 2221 case AArch64::BI__builtin_neon_vcgezs_f32: 2222 case AArch64::BI__builtin_neon_vcgezd_f64: 2223 Int = Intrinsic::aarch64_neon_vcge; s = "vcge"; 2224 // Add implicit zero operand. 2225 Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType())); 2226 OverloadCmpInt = true; break; 2227 // Scalar Floating-point Compare Greather Than 2228 case AArch64::BI__builtin_neon_vcgts_f32: 2229 case AArch64::BI__builtin_neon_vcgtd_f64: 2230 Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt"; 2231 OverloadCmpInt = true; break; 2232 // Scalar Floating-point Compare Greather Than Zero 2233 case AArch64::BI__builtin_neon_vcgtzs_f32: 2234 case AArch64::BI__builtin_neon_vcgtzd_f64: 2235 Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt"; 2236 // Add implicit zero operand. 2237 Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType())); 2238 OverloadCmpInt = true; break; 2239 // Scalar Floating-point Compare Less Than or Equal 2240 case AArch64::BI__builtin_neon_vcles_f32: 2241 case AArch64::BI__builtin_neon_vcled_f64: 2242 Int = Intrinsic::aarch64_neon_vcge; s = "vcge"; 2243 OverloadCmpInt = true; break; 2244 // Scalar Floating-point Compare Less Than Or Equal To Zero 2245 case AArch64::BI__builtin_neon_vclezs_f32: 2246 case AArch64::BI__builtin_neon_vclezd_f64: 2247 Int = Intrinsic::aarch64_neon_vclez; s = "vcle"; 2248 // Add implicit zero operand. 2249 Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType())); 2250 OverloadCmpInt = true; break; 2251 // Scalar Floating-point Compare Less Than Zero 2252 case AArch64::BI__builtin_neon_vclts_f32: 2253 case AArch64::BI__builtin_neon_vcltd_f64: 2254 Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt"; 2255 OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break; 2256 // Scalar Floating-point Compare Less Than Zero 2257 case AArch64::BI__builtin_neon_vcltzs_f32: 2258 case AArch64::BI__builtin_neon_vcltzd_f64: 2259 Int = Intrinsic::aarch64_neon_vcltz; s = "vclt"; 2260 // Add implicit zero operand. 2261 Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType())); 2262 OverloadCmpInt = true; break; 2263 // Scalar Floating-point Absolute Compare Greater Than Or Equal 2264 case AArch64::BI__builtin_neon_vcages_f32: 2265 case AArch64::BI__builtin_neon_vcaged_f64: 2266 Int = Intrinsic::aarch64_neon_vcage; s = "vcage"; 2267 OverloadCmpInt = true; break; 2268 // Scalar Floating-point Absolute Compare Greater Than 2269 case AArch64::BI__builtin_neon_vcagts_f32: 2270 case AArch64::BI__builtin_neon_vcagtd_f64: 2271 Int = Intrinsic::aarch64_neon_vcagt; s = "vcagt"; 2272 OverloadCmpInt = true; break; 2273 // Scalar Floating-point Absolute Compare Less Than Or Equal 2274 case AArch64::BI__builtin_neon_vcales_f32: 2275 case AArch64::BI__builtin_neon_vcaled_f64: 2276 Int = Intrinsic::aarch64_neon_vcage; s = "vcage"; 2277 OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break; 2278 // Scalar Floating-point Absolute Compare Less Than 2279 case AArch64::BI__builtin_neon_vcalts_f32: 2280 case AArch64::BI__builtin_neon_vcaltd_f64: 2281 Int = Intrinsic::aarch64_neon_vcagt; s = "vcalt"; 2282 OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break; 2283 // Scalar Compare Bitwise Test Bits 2284 case AArch64::BI__builtin_neon_vtstd_s64: 2285 case AArch64::BI__builtin_neon_vtstd_u64: 2286 Int = Intrinsic::aarch64_neon_vtstd; s = "vtst"; 2287 OverloadCmpInt = true; break; 2288 // Scalar Absolute Value 2289 case AArch64::BI__builtin_neon_vabsd_s64: 2290 Int = Intrinsic::aarch64_neon_vabs; 2291 s = "vabs"; OverloadInt = false; break; 2292 // Scalar Signed Saturating Absolute Value 2293 case AArch64::BI__builtin_neon_vqabsb_s8: 2294 case AArch64::BI__builtin_neon_vqabsh_s16: 2295 case AArch64::BI__builtin_neon_vqabss_s32: 2296 case AArch64::BI__builtin_neon_vqabsd_s64: 2297 Int = Intrinsic::arm_neon_vqabs; 2298 s = "vqabs"; OverloadInt = true; break; 2299 // Scalar Negate 2300 case AArch64::BI__builtin_neon_vnegd_s64: 2301 Int = Intrinsic::aarch64_neon_vneg; 2302 s = "vneg"; OverloadInt = false; break; 2303 // Scalar Signed Saturating Negate 2304 case AArch64::BI__builtin_neon_vqnegb_s8: 2305 case AArch64::BI__builtin_neon_vqnegh_s16: 2306 case AArch64::BI__builtin_neon_vqnegs_s32: 2307 case AArch64::BI__builtin_neon_vqnegd_s64: 2308 Int = Intrinsic::arm_neon_vqneg; 2309 s = "vqneg"; OverloadInt = true; break; 2310 // Scalar Signed Saturating Accumulated of Unsigned Value 2311 case AArch64::BI__builtin_neon_vuqaddb_s8: 2312 case AArch64::BI__builtin_neon_vuqaddh_s16: 2313 case AArch64::BI__builtin_neon_vuqadds_s32: 2314 case AArch64::BI__builtin_neon_vuqaddd_s64: 2315 Int = Intrinsic::aarch64_neon_vuqadd; 2316 s = "vuqadd"; OverloadInt = true; break; 2317 // Scalar Unsigned Saturating Accumulated of Signed Value 2318 case AArch64::BI__builtin_neon_vsqaddb_u8: 2319 case AArch64::BI__builtin_neon_vsqaddh_u16: 2320 case AArch64::BI__builtin_neon_vsqadds_u32: 2321 case AArch64::BI__builtin_neon_vsqaddd_u64: 2322 Int = Intrinsic::aarch64_neon_vsqadd; 2323 s = "vsqadd"; OverloadInt = true; break; 2324 // Signed Saturating Doubling Multiply-Add Long 2325 case AArch64::BI__builtin_neon_vqdmlalh_s16: 2326 case AArch64::BI__builtin_neon_vqdmlals_s32: 2327 Int = Intrinsic::aarch64_neon_vqdmlal; 2328 s = "vqdmlal"; OverloadWideInt = true; break; 2329 // Signed Saturating Doubling Multiply-Subtract Long 2330 case AArch64::BI__builtin_neon_vqdmlslh_s16: 2331 case AArch64::BI__builtin_neon_vqdmlsls_s32: 2332 Int = Intrinsic::aarch64_neon_vqdmlsl; 2333 s = "vqdmlsl"; OverloadWideInt = true; break; 2334 // Signed Saturating Doubling Multiply Long 2335 case AArch64::BI__builtin_neon_vqdmullh_s16: 2336 case AArch64::BI__builtin_neon_vqdmulls_s32: 2337 Int = Intrinsic::arm_neon_vqdmull; 2338 s = "vqdmull"; OverloadWideInt = true; break; 2339 // Scalar Signed Saturating Extract Unsigned Narrow 2340 case AArch64::BI__builtin_neon_vqmovunh_s16: 2341 case AArch64::BI__builtin_neon_vqmovuns_s32: 2342 case AArch64::BI__builtin_neon_vqmovund_s64: 2343 Int = Intrinsic::arm_neon_vqmovnsu; 2344 s = "vqmovun"; OverloadNarrowInt = true; break; 2345 // Scalar Signed Saturating Extract Narrow 2346 case AArch64::BI__builtin_neon_vqmovnh_s16: 2347 case AArch64::BI__builtin_neon_vqmovns_s32: 2348 case AArch64::BI__builtin_neon_vqmovnd_s64: 2349 Int = Intrinsic::arm_neon_vqmovns; 2350 s = "vqmovn"; OverloadNarrowInt = true; break; 2351 // Scalar Unsigned Saturating Extract Narrow 2352 case AArch64::BI__builtin_neon_vqmovnh_u16: 2353 case AArch64::BI__builtin_neon_vqmovns_u32: 2354 case AArch64::BI__builtin_neon_vqmovnd_u64: 2355 Int = Intrinsic::arm_neon_vqmovnu; 2356 s = "vqmovn"; OverloadNarrowInt = true; break; 2357 // Scalar Signed Shift Right (Immediate) 2358 case AArch64::BI__builtin_neon_vshrd_n_s64: 2359 Int = Intrinsic::aarch64_neon_vshrds_n; 2360 s = "vsshr"; OverloadInt = false; break; 2361 // Scalar Unsigned Shift Right (Immediate) 2362 case AArch64::BI__builtin_neon_vshrd_n_u64: 2363 Int = Intrinsic::aarch64_neon_vshrdu_n; 2364 s = "vushr"; OverloadInt = false; break; 2365 // Scalar Signed Rounding Shift Right (Immediate) 2366 case AArch64::BI__builtin_neon_vrshrd_n_s64: 2367 Int = Intrinsic::aarch64_neon_vsrshr; 2368 s = "vsrshr"; OverloadInt = true; break; 2369 // Scalar Unsigned Rounding Shift Right (Immediate) 2370 case AArch64::BI__builtin_neon_vrshrd_n_u64: 2371 Int = Intrinsic::aarch64_neon_vurshr; 2372 s = "vurshr"; OverloadInt = true; break; 2373 // Scalar Signed Shift Right and Accumulate (Immediate) 2374 case AArch64::BI__builtin_neon_vsrad_n_s64: 2375 Int = Intrinsic::aarch64_neon_vsrads_n; 2376 s = "vssra"; OverloadInt = false; break; 2377 // Scalar Unsigned Shift Right and Accumulate (Immediate) 2378 case AArch64::BI__builtin_neon_vsrad_n_u64: 2379 Int = Intrinsic::aarch64_neon_vsradu_n; 2380 s = "vusra"; OverloadInt = false; break; 2381 // Scalar Signed Rounding Shift Right and Accumulate (Immediate) 2382 case AArch64::BI__builtin_neon_vrsrad_n_s64: 2383 Int = Intrinsic::aarch64_neon_vrsrads_n; 2384 s = "vsrsra"; OverloadInt = false; break; 2385 // Scalar Unsigned Rounding Shift Right and Accumulate (Immediate) 2386 case AArch64::BI__builtin_neon_vrsrad_n_u64: 2387 Int = Intrinsic::aarch64_neon_vrsradu_n; 2388 s = "vursra"; OverloadInt = false; break; 2389 // Scalar Signed/Unsigned Shift Left (Immediate) 2390 case AArch64::BI__builtin_neon_vshld_n_s64: 2391 case AArch64::BI__builtin_neon_vshld_n_u64: 2392 Int = Intrinsic::aarch64_neon_vshld_n; 2393 s = "vshl"; OverloadInt = false; break; 2394 // Signed Saturating Shift Left (Immediate) 2395 case AArch64::BI__builtin_neon_vqshlb_n_s8: 2396 case AArch64::BI__builtin_neon_vqshlh_n_s16: 2397 case AArch64::BI__builtin_neon_vqshls_n_s32: 2398 case AArch64::BI__builtin_neon_vqshld_n_s64: 2399 Int = Intrinsic::aarch64_neon_vqshls_n; 2400 s = "vsqshl"; OverloadInt = true; break; 2401 // Unsigned Saturating Shift Left (Immediate) 2402 case AArch64::BI__builtin_neon_vqshlb_n_u8: 2403 case AArch64::BI__builtin_neon_vqshlh_n_u16: 2404 case AArch64::BI__builtin_neon_vqshls_n_u32: 2405 case AArch64::BI__builtin_neon_vqshld_n_u64: 2406 Int = Intrinsic::aarch64_neon_vqshlu_n; 2407 s = "vuqshl"; OverloadInt = true; break; 2408 // Signed Saturating Shift Left Unsigned (Immediate) 2409 case AArch64::BI__builtin_neon_vqshlub_n_s8: 2410 case AArch64::BI__builtin_neon_vqshluh_n_s16: 2411 case AArch64::BI__builtin_neon_vqshlus_n_s32: 2412 case AArch64::BI__builtin_neon_vqshlud_n_s64: 2413 Int = Intrinsic::aarch64_neon_vsqshlu; 2414 s = "vsqshlu"; OverloadInt = true; break; 2415 // Shift Right And Insert (Immediate) 2416 case AArch64::BI__builtin_neon_vsrid_n_s64: 2417 case AArch64::BI__builtin_neon_vsrid_n_u64: 2418 Int = Intrinsic::aarch64_neon_vsri; 2419 s = "vsri"; OverloadInt = true; break; 2420 // Shift Left And Insert (Immediate) 2421 case AArch64::BI__builtin_neon_vslid_n_s64: 2422 case AArch64::BI__builtin_neon_vslid_n_u64: 2423 Int = Intrinsic::aarch64_neon_vsli; 2424 s = "vsli"; OverloadInt = true; break; 2425 // Signed Saturating Shift Right Narrow (Immediate) 2426 case AArch64::BI__builtin_neon_vqshrnh_n_s16: 2427 case AArch64::BI__builtin_neon_vqshrns_n_s32: 2428 case AArch64::BI__builtin_neon_vqshrnd_n_s64: 2429 Int = Intrinsic::aarch64_neon_vsqshrn; 2430 s = "vsqshrn"; OverloadInt = true; break; 2431 // Unsigned Saturating Shift Right Narrow (Immediate) 2432 case AArch64::BI__builtin_neon_vqshrnh_n_u16: 2433 case AArch64::BI__builtin_neon_vqshrns_n_u32: 2434 case AArch64::BI__builtin_neon_vqshrnd_n_u64: 2435 Int = Intrinsic::aarch64_neon_vuqshrn; 2436 s = "vuqshrn"; OverloadInt = true; break; 2437 // Signed Saturating Rounded Shift Right Narrow (Immediate) 2438 case AArch64::BI__builtin_neon_vqrshrnh_n_s16: 2439 case AArch64::BI__builtin_neon_vqrshrns_n_s32: 2440 case AArch64::BI__builtin_neon_vqrshrnd_n_s64: 2441 Int = Intrinsic::aarch64_neon_vsqrshrn; 2442 s = "vsqrshrn"; OverloadInt = true; break; 2443 // Unsigned Saturating Rounded Shift Right Narrow (Immediate) 2444 case AArch64::BI__builtin_neon_vqrshrnh_n_u16: 2445 case AArch64::BI__builtin_neon_vqrshrns_n_u32: 2446 case AArch64::BI__builtin_neon_vqrshrnd_n_u64: 2447 Int = Intrinsic::aarch64_neon_vuqrshrn; 2448 s = "vuqrshrn"; OverloadInt = true; break; 2449 // Signed Saturating Shift Right Unsigned Narrow (Immediate) 2450 case AArch64::BI__builtin_neon_vqshrunh_n_s16: 2451 case AArch64::BI__builtin_neon_vqshruns_n_s32: 2452 case AArch64::BI__builtin_neon_vqshrund_n_s64: 2453 Int = Intrinsic::aarch64_neon_vsqshrun; 2454 s = "vsqshrun"; OverloadInt = true; break; 2455 // Signed Saturating Rounded Shift Right Unsigned Narrow (Immediate) 2456 case AArch64::BI__builtin_neon_vqrshrunh_n_s16: 2457 case AArch64::BI__builtin_neon_vqrshruns_n_s32: 2458 case AArch64::BI__builtin_neon_vqrshrund_n_s64: 2459 Int = Intrinsic::aarch64_neon_vsqrshrun; 2460 s = "vsqrshrun"; OverloadInt = true; break; 2461 // Scalar Signed Fixed-point Convert To Floating-Point (Immediate) 2462 case AArch64::BI__builtin_neon_vcvts_n_f32_s32: 2463 Int = Intrinsic::aarch64_neon_vcvtf32_n_s32; 2464 s = "vcvtf"; OverloadInt = false; break; 2465 case AArch64::BI__builtin_neon_vcvtd_n_f64_s64: 2466 Int = Intrinsic::aarch64_neon_vcvtf64_n_s64; 2467 s = "vcvtf"; OverloadInt = false; break; 2468 // Scalar Unsigned Fixed-point Convert To Floating-Point (Immediate) 2469 case AArch64::BI__builtin_neon_vcvts_n_f32_u32: 2470 Int = Intrinsic::aarch64_neon_vcvtf32_n_u32; 2471 s = "vcvtf"; OverloadInt = false; break; 2472 case AArch64::BI__builtin_neon_vcvtd_n_f64_u64: 2473 Int = Intrinsic::aarch64_neon_vcvtf64_n_u64; 2474 s = "vcvtf"; OverloadInt = false; break; 2475 // Scalar Floating-point Convert To Signed Fixed-point (Immediate) 2476 case AArch64::BI__builtin_neon_vcvts_n_s32_f32: 2477 Int = Intrinsic::aarch64_neon_vcvts_n_s32_f32; 2478 s = "fcvtzs"; OverloadInt = false; break; 2479 case AArch64::BI__builtin_neon_vcvtd_n_s64_f64: 2480 Int = Intrinsic::aarch64_neon_vcvtd_n_s64_f64; 2481 s = "fcvtzs"; OverloadInt = false; break; 2482 // Scalar Floating-point Convert To Unsigned Fixed-point (Immediate) 2483 case AArch64::BI__builtin_neon_vcvts_n_u32_f32: 2484 Int = Intrinsic::aarch64_neon_vcvts_n_u32_f32; 2485 s = "fcvtzu"; OverloadInt = false; break; 2486 case AArch64::BI__builtin_neon_vcvtd_n_u64_f64: 2487 Int = Intrinsic::aarch64_neon_vcvtd_n_u64_f64; 2488 s = "fcvtzu"; OverloadInt = false; break; 2489 } 2490 2491 if (!Int) 2492 return 0; 2493 2494 // AArch64 scalar builtin that returns scalar type 2495 // and should be mapped to AArch64 intrinsic that returns 2496 // one-element vector type. 2497 Function *F = 0; 2498 if (AcrossVec) { 2499 // Gen arg type 2500 const Expr *Arg = E->getArg(E->getNumArgs()-1); 2501 llvm::Type *Ty = CGF.ConvertType(Arg->getType()); 2502 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty); 2503 llvm::Type *ETy = VTy->getElementType(); 2504 llvm::VectorType *RTy = llvm::VectorType::get(ETy, 1); 2505 2506 if (ExtendEle) { 2507 assert(!ETy->isFloatingPointTy()); 2508 RTy = llvm::VectorType::getExtendedElementVectorType(RTy); 2509 } 2510 2511 llvm::Type *Tys[2] = {RTy, VTy}; 2512 F = CGF.CGM.getIntrinsic(Int, Tys); 2513 assert(E->getNumArgs() == 1); 2514 } else if (OverloadInt) { 2515 // Determine the type of this overloaded AArch64 intrinsic 2516 llvm::Type *Ty = CGF.ConvertType(E->getCallReturnType()); 2517 llvm::VectorType *VTy = llvm::VectorType::get(Ty, 1); 2518 assert(VTy); 2519 2520 F = CGF.CGM.getIntrinsic(Int, VTy); 2521 } else if (OverloadWideInt || OverloadNarrowInt) { 2522 // Determine the type of this overloaded AArch64 intrinsic 2523 const Expr *Arg = E->getArg(E->getNumArgs()-1); 2524 llvm::Type *Ty = CGF.ConvertType(Arg->getType()); 2525 llvm::VectorType *VTy = llvm::VectorType::get(Ty, 1); 2526 llvm::VectorType *RTy = OverloadWideInt ? 2527 llvm::VectorType::getExtendedElementVectorType(VTy) : 2528 llvm::VectorType::getTruncatedElementVectorType(VTy); 2529 F = CGF.CGM.getIntrinsic(Int, RTy); 2530 } else if (OverloadCmpInt) { 2531 // Determine the types of this overloaded AArch64 intrinsic 2532 SmallVector<llvm::Type *, 3> Tys; 2533 const Expr *Arg = E->getArg(E->getNumArgs()-1); 2534 llvm::Type *Ty = CGF.ConvertType(E->getCallReturnType()); 2535 llvm::VectorType *VTy = llvm::VectorType::get(Ty, 1); 2536 Tys.push_back(VTy); 2537 Ty = CGF.ConvertType(Arg->getType()); 2538 VTy = llvm::VectorType::get(Ty, 1); 2539 Tys.push_back(VTy); 2540 Tys.push_back(VTy); 2541 2542 F = CGF.CGM.getIntrinsic(Int, Tys); 2543 } else 2544 F = CGF.CGM.getIntrinsic(Int); 2545 2546 Value *Result = CGF.EmitNeonCall(F, Ops, s); 2547 llvm::Type *ResultType = CGF.ConvertType(E->getType()); 2548 // AArch64 intrinsic one-element vector type cast to 2549 // scalar type expected by the builtin 2550 return CGF.Builder.CreateBitCast(Result, ResultType, s); 2551} 2552 2553Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr( 2554 Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp, 2555 const CmpInst::Predicate Ip, const Twine &Name) { 2556 llvm::Type *OTy = ((llvm::User *)Op)->getOperand(0)->getType(); 2557 if (OTy->isPointerTy()) 2558 OTy = Ty; 2559 Op = Builder.CreateBitCast(Op, OTy); 2560 if (((llvm::VectorType *)OTy)->getElementType()->isFloatingPointTy()) { 2561 Op = Builder.CreateFCmp(Fp, Op, ConstantAggregateZero::get(OTy)); 2562 } else { 2563 Op = Builder.CreateICmp(Ip, Op, ConstantAggregateZero::get(OTy)); 2564 } 2565 return Builder.CreateZExt(Op, Ty, Name); 2566} 2567 2568static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops, 2569 Value *ExtOp, Value *IndexOp, 2570 llvm::Type *ResTy, unsigned IntID, 2571 const char *Name) { 2572 SmallVector<Value *, 2> TblOps; 2573 if (ExtOp) 2574 TblOps.push_back(ExtOp); 2575 2576 // Build a vector containing sequential number like (0, 1, 2, ..., 15) 2577 SmallVector<Constant*, 16> Indices; 2578 llvm::VectorType *TblTy = cast<llvm::VectorType>(Ops[0]->getType()); 2579 for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) { 2580 Indices.push_back(ConstantInt::get(CGF.Int32Ty, 2*i)); 2581 Indices.push_back(ConstantInt::get(CGF.Int32Ty, 2*i+1)); 2582 } 2583 Value *SV = llvm::ConstantVector::get(Indices); 2584 2585 int PairPos = 0, End = Ops.size() - 1; 2586 while (PairPos < End) { 2587 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos], 2588 Ops[PairPos+1], SV, Name)); 2589 PairPos += 2; 2590 } 2591 2592 // If there's an odd number of 64-bit lookup table, fill the high 64-bit 2593 // of the 128-bit lookup table with zero. 2594 if (PairPos == End) { 2595 Value *ZeroTbl = ConstantAggregateZero::get(TblTy); 2596 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos], 2597 ZeroTbl, SV, Name)); 2598 } 2599 2600 TblTy = llvm::VectorType::get(TblTy->getElementType(), 2601 2*TblTy->getNumElements()); 2602 llvm::Type *Tys[2] = { ResTy, TblTy }; 2603 2604 Function *TblF; 2605 TblOps.push_back(IndexOp); 2606 TblF = CGF.CGM.getIntrinsic(IntID, Tys); 2607 2608 return CGF.EmitNeonCall(TblF, TblOps, Name); 2609} 2610 2611static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, 2612 unsigned BuiltinID, 2613 const CallExpr *E) { 2614 unsigned int Int = 0; 2615 const char *s = NULL; 2616 2617 unsigned TblPos; 2618 switch (BuiltinID) { 2619 default: 2620 return 0; 2621 case AArch64::BI__builtin_neon_vtbl1_v: 2622 case AArch64::BI__builtin_neon_vqtbl1_v: 2623 case AArch64::BI__builtin_neon_vqtbl1q_v: 2624 case AArch64::BI__builtin_neon_vtbl2_v: 2625 case AArch64::BI__builtin_neon_vqtbl2_v: 2626 case AArch64::BI__builtin_neon_vqtbl2q_v: 2627 case AArch64::BI__builtin_neon_vtbl3_v: 2628 case AArch64::BI__builtin_neon_vqtbl3_v: 2629 case AArch64::BI__builtin_neon_vqtbl3q_v: 2630 case AArch64::BI__builtin_neon_vtbl4_v: 2631 case AArch64::BI__builtin_neon_vqtbl4_v: 2632 case AArch64::BI__builtin_neon_vqtbl4q_v: 2633 TblPos = 0; 2634 break; 2635 case AArch64::BI__builtin_neon_vtbx1_v: 2636 case AArch64::BI__builtin_neon_vqtbx1_v: 2637 case AArch64::BI__builtin_neon_vqtbx1q_v: 2638 case AArch64::BI__builtin_neon_vtbx2_v: 2639 case AArch64::BI__builtin_neon_vqtbx2_v: 2640 case AArch64::BI__builtin_neon_vqtbx2q_v: 2641 case AArch64::BI__builtin_neon_vtbx3_v: 2642 case AArch64::BI__builtin_neon_vqtbx3_v: 2643 case AArch64::BI__builtin_neon_vqtbx3q_v: 2644 case AArch64::BI__builtin_neon_vtbx4_v: 2645 case AArch64::BI__builtin_neon_vqtbx4_v: 2646 case AArch64::BI__builtin_neon_vqtbx4q_v: 2647 TblPos = 1; 2648 break; 2649 } 2650 2651 assert(E->getNumArgs() >= 3); 2652 2653 // Get the last argument, which specifies the vector type. 2654 llvm::APSInt Result; 2655 const Expr *Arg = E->getArg(E->getNumArgs() - 1); 2656 if (!Arg->isIntegerConstantExpr(Result, CGF.getContext())) 2657 return 0; 2658 2659 // Determine the type of this overloaded NEON intrinsic. 2660 NeonTypeFlags Type(Result.getZExtValue()); 2661 llvm::VectorType *VTy = GetNeonType(&CGF, Type); 2662 llvm::Type *Ty = VTy; 2663 if (!Ty) 2664 return 0; 2665 2666 SmallVector<Value *, 4> Ops; 2667 for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) { 2668 Ops.push_back(CGF.EmitScalarExpr(E->getArg(i))); 2669 } 2670 2671 Arg = E->getArg(TblPos); 2672 llvm::Type *TblTy = CGF.ConvertType(Arg->getType()); 2673 llvm::VectorType *VTblTy = cast<llvm::VectorType>(TblTy); 2674 llvm::Type *Tys[2] = { Ty, VTblTy }; 2675 unsigned nElts = VTy->getNumElements(); 2676 2677 // AArch64 scalar builtins are not overloaded, they do not have an extra 2678 // argument that specifies the vector type, need to handle each case. 2679 SmallVector<Value *, 2> TblOps; 2680 switch (BuiltinID) { 2681 case AArch64::BI__builtin_neon_vtbl1_v: { 2682 TblOps.push_back(Ops[0]); 2683 return packTBLDVectorList(CGF, TblOps, 0, Ops[1], Ty, 2684 Intrinsic::aarch64_neon_vtbl1, "vtbl1"); 2685 } 2686 case AArch64::BI__builtin_neon_vtbl2_v: { 2687 TblOps.push_back(Ops[0]); 2688 TblOps.push_back(Ops[1]); 2689 return packTBLDVectorList(CGF, TblOps, 0, Ops[2], Ty, 2690 Intrinsic::aarch64_neon_vtbl1, "vtbl1"); 2691 } 2692 case AArch64::BI__builtin_neon_vtbl3_v: { 2693 TblOps.push_back(Ops[0]); 2694 TblOps.push_back(Ops[1]); 2695 TblOps.push_back(Ops[2]); 2696 return packTBLDVectorList(CGF, TblOps, 0, Ops[3], Ty, 2697 Intrinsic::aarch64_neon_vtbl2, "vtbl2"); 2698 } 2699 case AArch64::BI__builtin_neon_vtbl4_v: { 2700 TblOps.push_back(Ops[0]); 2701 TblOps.push_back(Ops[1]); 2702 TblOps.push_back(Ops[2]); 2703 TblOps.push_back(Ops[3]); 2704 return packTBLDVectorList(CGF, TblOps, 0, Ops[4], Ty, 2705 Intrinsic::aarch64_neon_vtbl2, "vtbl2"); 2706 } 2707 case AArch64::BI__builtin_neon_vtbx1_v: { 2708 TblOps.push_back(Ops[1]); 2709 Value *TblRes = packTBLDVectorList(CGF, TblOps, 0, Ops[2], Ty, 2710 Intrinsic::aarch64_neon_vtbl1, "vtbl1"); 2711 2712 llvm::Constant *Eight = ConstantInt::get(VTy->getElementType(), 8); 2713 Value* EightV = llvm::ConstantVector::getSplat(nElts, Eight); 2714 Value *CmpRes = CGF.Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV); 2715 CmpRes = CGF.Builder.CreateSExt(CmpRes, Ty); 2716 2717 SmallVector<Value *, 4> BslOps; 2718 BslOps.push_back(CmpRes); 2719 BslOps.push_back(Ops[0]); 2720 BslOps.push_back(TblRes); 2721 Function *BslF = CGF.CGM.getIntrinsic(Intrinsic::arm_neon_vbsl, Ty); 2722 return CGF.EmitNeonCall(BslF, BslOps, "vbsl"); 2723 } 2724 case AArch64::BI__builtin_neon_vtbx2_v: { 2725 TblOps.push_back(Ops[1]); 2726 TblOps.push_back(Ops[2]); 2727 return packTBLDVectorList(CGF, TblOps, Ops[0], Ops[3], Ty, 2728 Intrinsic::aarch64_neon_vtbx1, "vtbx1"); 2729 } 2730 case AArch64::BI__builtin_neon_vtbx3_v: { 2731 TblOps.push_back(Ops[1]); 2732 TblOps.push_back(Ops[2]); 2733 TblOps.push_back(Ops[3]); 2734 Value *TblRes = packTBLDVectorList(CGF, TblOps, 0, Ops[4], Ty, 2735 Intrinsic::aarch64_neon_vtbl2, "vtbl2"); 2736 2737 llvm::Constant *TwentyFour = ConstantInt::get(VTy->getElementType(), 24); 2738 Value* TwentyFourV = llvm::ConstantVector::getSplat(nElts, TwentyFour); 2739 Value *CmpRes = CGF.Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4], 2740 TwentyFourV); 2741 CmpRes = CGF.Builder.CreateSExt(CmpRes, Ty); 2742 2743 SmallVector<Value *, 4> BslOps; 2744 BslOps.push_back(CmpRes); 2745 BslOps.push_back(Ops[0]); 2746 BslOps.push_back(TblRes); 2747 Function *BslF = CGF.CGM.getIntrinsic(Intrinsic::arm_neon_vbsl, Ty); 2748 return CGF.EmitNeonCall(BslF, BslOps, "vbsl"); 2749 } 2750 case AArch64::BI__builtin_neon_vtbx4_v: { 2751 TblOps.push_back(Ops[1]); 2752 TblOps.push_back(Ops[2]); 2753 TblOps.push_back(Ops[3]); 2754 TblOps.push_back(Ops[4]); 2755 return packTBLDVectorList(CGF, TblOps, Ops[0], Ops[5], Ty, 2756 Intrinsic::aarch64_neon_vtbx2, "vtbx2"); 2757 } 2758 case AArch64::BI__builtin_neon_vqtbl1_v: 2759 case AArch64::BI__builtin_neon_vqtbl1q_v: 2760 Int = Intrinsic::aarch64_neon_vtbl1; s = "vtbl1"; break; 2761 case AArch64::BI__builtin_neon_vqtbl2_v: 2762 case AArch64::BI__builtin_neon_vqtbl2q_v: { 2763 Int = Intrinsic::aarch64_neon_vtbl2; s = "vtbl2"; break; 2764 case AArch64::BI__builtin_neon_vqtbl3_v: 2765 case AArch64::BI__builtin_neon_vqtbl3q_v: 2766 Int = Intrinsic::aarch64_neon_vtbl3; s = "vtbl3"; break; 2767 case AArch64::BI__builtin_neon_vqtbl4_v: 2768 case AArch64::BI__builtin_neon_vqtbl4q_v: 2769 Int = Intrinsic::aarch64_neon_vtbl4; s = "vtbl4"; break; 2770 case AArch64::BI__builtin_neon_vqtbx1_v: 2771 case AArch64::BI__builtin_neon_vqtbx1q_v: 2772 Int = Intrinsic::aarch64_neon_vtbx1; s = "vtbx1"; break; 2773 case AArch64::BI__builtin_neon_vqtbx2_v: 2774 case AArch64::BI__builtin_neon_vqtbx2q_v: 2775 Int = Intrinsic::aarch64_neon_vtbx2; s = "vtbx2"; break; 2776 case AArch64::BI__builtin_neon_vqtbx3_v: 2777 case AArch64::BI__builtin_neon_vqtbx3q_v: 2778 Int = Intrinsic::aarch64_neon_vtbx3; s = "vtbx3"; break; 2779 case AArch64::BI__builtin_neon_vqtbx4_v: 2780 case AArch64::BI__builtin_neon_vqtbx4q_v: 2781 Int = Intrinsic::aarch64_neon_vtbx4; s = "vtbx4"; break; 2782 } 2783 } 2784 2785 if (!Int) 2786 return 0; 2787 2788 Function *F = CGF.CGM.getIntrinsic(Int, Tys); 2789 return CGF.EmitNeonCall(F, Ops, s); 2790} 2791 2792Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, 2793 const CallExpr *E) { 2794 // Process AArch64 scalar builtins 2795 if (Value *Result = EmitAArch64ScalarBuiltinExpr(*this, BuiltinID, E)) 2796 return Result; 2797 2798 // Process AArch64 table lookup builtins 2799 if (Value *Result = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E)) 2800 return Result; 2801 2802 if (BuiltinID == AArch64::BI__clear_cache) { 2803 assert(E->getNumArgs() == 2 && 2804 "Variadic __clear_cache slipped through on AArch64"); 2805 2806 const FunctionDecl *FD = E->getDirectCallee(); 2807 SmallVector<Value *, 2> Ops; 2808 for (unsigned i = 0; i < E->getNumArgs(); i++) 2809 Ops.push_back(EmitScalarExpr(E->getArg(i))); 2810 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType()); 2811 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty); 2812 StringRef Name = FD->getName(); 2813 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops); 2814 } 2815 2816 SmallVector<Value *, 4> Ops; 2817 llvm::Value *Align = 0; // Alignment for load/store 2818 for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) { 2819 if (i == 0) { 2820 switch (BuiltinID) { 2821 case AArch64::BI__builtin_neon_vst1_x2_v: 2822 case AArch64::BI__builtin_neon_vst1q_x2_v: 2823 case AArch64::BI__builtin_neon_vst1_x3_v: 2824 case AArch64::BI__builtin_neon_vst1q_x3_v: 2825 case AArch64::BI__builtin_neon_vst1_x4_v: 2826 case AArch64::BI__builtin_neon_vst1q_x4_v: 2827 // Handle ld1/st1 lane in this function a little different from ARM. 2828 case AArch64::BI__builtin_neon_vld1_lane_v: 2829 case AArch64::BI__builtin_neon_vld1q_lane_v: 2830 case AArch64::BI__builtin_neon_vst1_lane_v: 2831 case AArch64::BI__builtin_neon_vst1q_lane_v: 2832 // Get the alignment for the argument in addition to the value; 2833 // we'll use it later. 2834 std::pair<llvm::Value *, unsigned> Src = 2835 EmitPointerWithAlignment(E->getArg(0)); 2836 Ops.push_back(Src.first); 2837 Align = Builder.getInt32(Src.second); 2838 continue; 2839 } 2840 } 2841 if (i == 1) { 2842 switch (BuiltinID) { 2843 case AArch64::BI__builtin_neon_vld1_x2_v: 2844 case AArch64::BI__builtin_neon_vld1q_x2_v: 2845 case AArch64::BI__builtin_neon_vld1_x3_v: 2846 case AArch64::BI__builtin_neon_vld1q_x3_v: 2847 case AArch64::BI__builtin_neon_vld1_x4_v: 2848 case AArch64::BI__builtin_neon_vld1q_x4_v: 2849 // Handle ld1/st1 dup lane in this function a little different from ARM. 2850 case AArch64::BI__builtin_neon_vld2_dup_v: 2851 case AArch64::BI__builtin_neon_vld2q_dup_v: 2852 case AArch64::BI__builtin_neon_vld3_dup_v: 2853 case AArch64::BI__builtin_neon_vld3q_dup_v: 2854 case AArch64::BI__builtin_neon_vld4_dup_v: 2855 case AArch64::BI__builtin_neon_vld4q_dup_v: 2856 case AArch64::BI__builtin_neon_vld2_lane_v: 2857 case AArch64::BI__builtin_neon_vld2q_lane_v: 2858 // Get the alignment for the argument in addition to the value; 2859 // we'll use it later. 2860 std::pair<llvm::Value *, unsigned> Src = 2861 EmitPointerWithAlignment(E->getArg(1)); 2862 Ops.push_back(Src.first); 2863 Align = Builder.getInt32(Src.second); 2864 continue; 2865 } 2866 } 2867 Ops.push_back(EmitScalarExpr(E->getArg(i))); 2868 } 2869 2870 // Get the last argument, which specifies the vector type. 2871 llvm::APSInt Result; 2872 const Expr *Arg = E->getArg(E->getNumArgs() - 1); 2873 if (!Arg->isIntegerConstantExpr(Result, getContext())) 2874 return 0; 2875 2876 // Determine the type of this overloaded NEON intrinsic. 2877 NeonTypeFlags Type(Result.getZExtValue()); 2878 bool usgn = Type.isUnsigned(); 2879 2880 llvm::VectorType *VTy = GetNeonType(this, Type); 2881 llvm::Type *Ty = VTy; 2882 if (!Ty) 2883 return 0; 2884 2885 unsigned Int; 2886 switch (BuiltinID) { 2887 default: 2888 return 0; 2889 2890 // AArch64 builtins mapping to legacy ARM v7 builtins. 2891 // FIXME: the mapped builtins listed correspond to what has been tested 2892 // in aarch64-neon-intrinsics.c so far. 2893 case AArch64::BI__builtin_neon_vuzp_v: 2894 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vuzp_v, E); 2895 case AArch64::BI__builtin_neon_vuzpq_v: 2896 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vuzpq_v, E); 2897 case AArch64::BI__builtin_neon_vzip_v: 2898 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vzip_v, E); 2899 case AArch64::BI__builtin_neon_vzipq_v: 2900 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vzipq_v, E); 2901 case AArch64::BI__builtin_neon_vtrn_v: 2902 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vtrn_v, E); 2903 case AArch64::BI__builtin_neon_vtrnq_v: 2904 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vtrnq_v, E); 2905 case AArch64::BI__builtin_neon_vext_v: 2906 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vext_v, E); 2907 case AArch64::BI__builtin_neon_vextq_v: 2908 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vextq_v, E); 2909 case AArch64::BI__builtin_neon_vmul_v: 2910 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmul_v, E); 2911 case AArch64::BI__builtin_neon_vmulq_v: 2912 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmulq_v, E); 2913 case AArch64::BI__builtin_neon_vabd_v: 2914 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vabd_v, E); 2915 case AArch64::BI__builtin_neon_vabdq_v: 2916 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vabdq_v, E); 2917 case AArch64::BI__builtin_neon_vfma_v: 2918 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vfma_v, E); 2919 case AArch64::BI__builtin_neon_vfmaq_v: 2920 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vfmaq_v, E); 2921 case AArch64::BI__builtin_neon_vbsl_v: 2922 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vbsl_v, E); 2923 case AArch64::BI__builtin_neon_vbslq_v: 2924 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vbslq_v, E); 2925 case AArch64::BI__builtin_neon_vrsqrts_v: 2926 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrsqrts_v, E); 2927 case AArch64::BI__builtin_neon_vrsqrtsq_v: 2928 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrsqrtsq_v, E); 2929 case AArch64::BI__builtin_neon_vrecps_v: 2930 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrecps_v, E); 2931 case AArch64::BI__builtin_neon_vrecpsq_v: 2932 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrecpsq_v, E); 2933 case AArch64::BI__builtin_neon_vcage_v: 2934 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcage_v, E); 2935 case AArch64::BI__builtin_neon_vcale_v: 2936 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcale_v, E); 2937 case AArch64::BI__builtin_neon_vcaleq_v: 2938 std::swap(Ops[0], Ops[1]); 2939 case AArch64::BI__builtin_neon_vcageq_v: { 2940 Function *F; 2941 if (VTy->getElementType()->isIntegerTy(64)) 2942 F = CGM.getIntrinsic(Intrinsic::aarch64_neon_vacgeq); 2943 else 2944 F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgeq); 2945 return EmitNeonCall(F, Ops, "vcage"); 2946 } 2947 case AArch64::BI__builtin_neon_vcalt_v: 2948 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcalt_v, E); 2949 case AArch64::BI__builtin_neon_vcagt_v: 2950 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcagt_v, E); 2951 case AArch64::BI__builtin_neon_vcaltq_v: 2952 std::swap(Ops[0], Ops[1]); 2953 case AArch64::BI__builtin_neon_vcagtq_v: { 2954 Function *F; 2955 if (VTy->getElementType()->isIntegerTy(64)) 2956 F = CGM.getIntrinsic(Intrinsic::aarch64_neon_vacgtq); 2957 else 2958 F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtq); 2959 return EmitNeonCall(F, Ops, "vcagt"); 2960 } 2961 case AArch64::BI__builtin_neon_vtst_v: 2962 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vtst_v, E); 2963 case AArch64::BI__builtin_neon_vtstq_v: 2964 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vtstq_v, E); 2965 case AArch64::BI__builtin_neon_vhadd_v: 2966 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vhadd_v, E); 2967 case AArch64::BI__builtin_neon_vhaddq_v: 2968 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vhaddq_v, E); 2969 case AArch64::BI__builtin_neon_vhsub_v: 2970 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vhsub_v, E); 2971 case AArch64::BI__builtin_neon_vhsubq_v: 2972 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vhsubq_v, E); 2973 case AArch64::BI__builtin_neon_vrhadd_v: 2974 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrhadd_v, E); 2975 case AArch64::BI__builtin_neon_vrhaddq_v: 2976 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrhaddq_v, E); 2977 case AArch64::BI__builtin_neon_vqadd_v: 2978 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqadd_v, E); 2979 case AArch64::BI__builtin_neon_vqaddq_v: 2980 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqaddq_v, E); 2981 case AArch64::BI__builtin_neon_vqsub_v: 2982 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqsub_v, E); 2983 case AArch64::BI__builtin_neon_vqsubq_v: 2984 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqsubq_v, E); 2985 case AArch64::BI__builtin_neon_vshl_v: 2986 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshl_v, E); 2987 case AArch64::BI__builtin_neon_vshlq_v: 2988 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshlq_v, E); 2989 case AArch64::BI__builtin_neon_vqshl_v: 2990 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqshl_v, E); 2991 case AArch64::BI__builtin_neon_vqshlq_v: 2992 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqshlq_v, E); 2993 case AArch64::BI__builtin_neon_vrshl_v: 2994 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrshl_v, E); 2995 case AArch64::BI__builtin_neon_vrshlq_v: 2996 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrshlq_v, E); 2997 case AArch64::BI__builtin_neon_vqrshl_v: 2998 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqrshl_v, E); 2999 case AArch64::BI__builtin_neon_vqrshlq_v: 3000 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqrshlq_v, E); 3001 case AArch64::BI__builtin_neon_vaddhn_v: 3002 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vaddhn_v, E); 3003 case AArch64::BI__builtin_neon_vraddhn_v: 3004 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vraddhn_v, E); 3005 case AArch64::BI__builtin_neon_vsubhn_v: 3006 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vsubhn_v, E); 3007 case AArch64::BI__builtin_neon_vrsubhn_v: 3008 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrsubhn_v, E); 3009 case AArch64::BI__builtin_neon_vmull_v: 3010 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmull_v, E); 3011 case AArch64::BI__builtin_neon_vqdmull_v: 3012 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqdmull_v, E); 3013 case AArch64::BI__builtin_neon_vqdmlal_v: 3014 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqdmlal_v, E); 3015 case AArch64::BI__builtin_neon_vqdmlsl_v: 3016 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqdmlsl_v, E); 3017 case AArch64::BI__builtin_neon_vmax_v: 3018 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmax_v, E); 3019 case AArch64::BI__builtin_neon_vmaxq_v: 3020 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmaxq_v, E); 3021 case AArch64::BI__builtin_neon_vmin_v: 3022 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmin_v, E); 3023 case AArch64::BI__builtin_neon_vminq_v: 3024 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vminq_v, E); 3025 case AArch64::BI__builtin_neon_vpmax_v: 3026 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vpmax_v, E); 3027 case AArch64::BI__builtin_neon_vpmin_v: 3028 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vpmin_v, E); 3029 case AArch64::BI__builtin_neon_vpadd_v: 3030 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vpadd_v, E); 3031 case AArch64::BI__builtin_neon_vqdmulh_v: 3032 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqdmulh_v, E); 3033 case AArch64::BI__builtin_neon_vqdmulhq_v: 3034 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqdmulhq_v, E); 3035 case AArch64::BI__builtin_neon_vqrdmulh_v: 3036 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqrdmulh_v, E); 3037 case AArch64::BI__builtin_neon_vqrdmulhq_v: 3038 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqrdmulhq_v, E); 3039 3040 // Shift by immediate 3041 case AArch64::BI__builtin_neon_vshr_n_v: 3042 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshr_n_v, E); 3043 case AArch64::BI__builtin_neon_vshrq_n_v: 3044 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshrq_n_v, E); 3045 case AArch64::BI__builtin_neon_vrshr_n_v: 3046 case AArch64::BI__builtin_neon_vrshrq_n_v: 3047 Int = usgn ? Intrinsic::aarch64_neon_vurshr 3048 : Intrinsic::aarch64_neon_vsrshr; 3049 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n"); 3050 case AArch64::BI__builtin_neon_vsra_n_v: 3051 if (VTy->getElementType()->isIntegerTy(64)) { 3052 Int = usgn ? Intrinsic::aarch64_neon_vsradu_n 3053 : Intrinsic::aarch64_neon_vsrads_n; 3054 return EmitNeonCall(CGM.getIntrinsic(Int), Ops, "vsra_n"); 3055 } 3056 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vsra_n_v, E); 3057 case AArch64::BI__builtin_neon_vsraq_n_v: 3058 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vsraq_n_v, E); 3059 case AArch64::BI__builtin_neon_vrsra_n_v: 3060 if (VTy->getElementType()->isIntegerTy(64)) { 3061 Int = usgn ? Intrinsic::aarch64_neon_vrsradu_n 3062 : Intrinsic::aarch64_neon_vrsrads_n; 3063 return EmitNeonCall(CGM.getIntrinsic(Int), Ops, "vrsra_n"); 3064 } 3065 // fall through 3066 case AArch64::BI__builtin_neon_vrsraq_n_v: { 3067 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 3068 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 3069 Int = usgn ? Intrinsic::aarch64_neon_vurshr 3070 : Intrinsic::aarch64_neon_vsrshr; 3071 Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Ty), Ops[1], Ops[2]); 3072 return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n"); 3073 } 3074 case AArch64::BI__builtin_neon_vshl_n_v: 3075 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshl_n_v, E); 3076 case AArch64::BI__builtin_neon_vshlq_n_v: 3077 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshlq_n_v, E); 3078 case AArch64::BI__builtin_neon_vqshl_n_v: 3079 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqshl_n_v, E); 3080 case AArch64::BI__builtin_neon_vqshlq_n_v: 3081 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqshlq_n_v, E); 3082 case AArch64::BI__builtin_neon_vqshlu_n_v: 3083 case AArch64::BI__builtin_neon_vqshluq_n_v: 3084 Int = Intrinsic::aarch64_neon_vsqshlu; 3085 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n"); 3086 case AArch64::BI__builtin_neon_vsri_n_v: 3087 case AArch64::BI__builtin_neon_vsriq_n_v: 3088 Int = Intrinsic::aarch64_neon_vsri; 3089 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsri_n"); 3090 case AArch64::BI__builtin_neon_vsli_n_v: 3091 case AArch64::BI__builtin_neon_vsliq_n_v: 3092 Int = Intrinsic::aarch64_neon_vsli; 3093 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsli_n"); 3094 case AArch64::BI__builtin_neon_vshll_n_v: { 3095 llvm::Type *SrcTy = llvm::VectorType::getTruncatedElementVectorType(VTy); 3096 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy); 3097 if (usgn) 3098 Ops[0] = Builder.CreateZExt(Ops[0], VTy); 3099 else 3100 Ops[0] = Builder.CreateSExt(Ops[0], VTy); 3101 Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false); 3102 return Builder.CreateShl(Ops[0], Ops[1], "vshll_n"); 3103 } 3104 case AArch64::BI__builtin_neon_vshrn_n_v: { 3105 llvm::Type *SrcTy = llvm::VectorType::getExtendedElementVectorType(VTy); 3106 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy); 3107 Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false); 3108 if (usgn) 3109 Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]); 3110 else 3111 Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]); 3112 return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n"); 3113 } 3114 case AArch64::BI__builtin_neon_vqshrun_n_v: 3115 Int = Intrinsic::aarch64_neon_vsqshrun; 3116 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n"); 3117 case AArch64::BI__builtin_neon_vrshrn_n_v: 3118 Int = Intrinsic::aarch64_neon_vrshrn; 3119 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n"); 3120 case AArch64::BI__builtin_neon_vqrshrun_n_v: 3121 Int = Intrinsic::aarch64_neon_vsqrshrun; 3122 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n"); 3123 case AArch64::BI__builtin_neon_vqshrn_n_v: 3124 Int = usgn ? Intrinsic::aarch64_neon_vuqshrn 3125 : Intrinsic::aarch64_neon_vsqshrn; 3126 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n"); 3127 case AArch64::BI__builtin_neon_vqrshrn_n_v: 3128 Int = usgn ? Intrinsic::aarch64_neon_vuqrshrn 3129 : Intrinsic::aarch64_neon_vsqrshrn; 3130 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n"); 3131 3132 // Convert 3133 case AArch64::BI__builtin_neon_vmovl_v: 3134 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmovl_v, E); 3135 case AArch64::BI__builtin_neon_vcvt_n_f32_v: 3136 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvt_n_f32_v, E); 3137 case AArch64::BI__builtin_neon_vcvtq_n_f32_v: 3138 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvtq_n_f32_v, E); 3139 case AArch64::BI__builtin_neon_vcvtq_n_f64_v: { 3140 llvm::Type *FloatTy = 3141 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, true)); 3142 llvm::Type *Tys[2] = { FloatTy, Ty }; 3143 Int = usgn ? Intrinsic::arm_neon_vcvtfxu2fp 3144 : Intrinsic::arm_neon_vcvtfxs2fp; 3145 Function *F = CGM.getIntrinsic(Int, Tys); 3146 return EmitNeonCall(F, Ops, "vcvt_n"); 3147 } 3148 case AArch64::BI__builtin_neon_vcvt_n_s32_v: 3149 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvt_n_s32_v, E); 3150 case AArch64::BI__builtin_neon_vcvtq_n_s32_v: 3151 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvtq_n_s32_v, E); 3152 case AArch64::BI__builtin_neon_vcvt_n_u32_v: 3153 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvt_n_u32_v, E); 3154 case AArch64::BI__builtin_neon_vcvtq_n_u32_v: 3155 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvtq_n_u32_v, E); 3156 case AArch64::BI__builtin_neon_vcvtq_n_s64_v: 3157 case AArch64::BI__builtin_neon_vcvtq_n_u64_v: { 3158 llvm::Type *FloatTy = 3159 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, true)); 3160 llvm::Type *Tys[2] = { Ty, FloatTy }; 3161 Int = usgn ? Intrinsic::arm_neon_vcvtfp2fxu 3162 : Intrinsic::arm_neon_vcvtfp2fxs; 3163 Function *F = CGM.getIntrinsic(Int, Tys); 3164 return EmitNeonCall(F, Ops, "vcvt_n"); 3165 } 3166 3167 // Load/Store 3168 case AArch64::BI__builtin_neon_vld1_v: 3169 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld1_v, E); 3170 case AArch64::BI__builtin_neon_vld1q_v: 3171 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld1q_v, E); 3172 case AArch64::BI__builtin_neon_vld2_v: 3173 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld2_v, E); 3174 case AArch64::BI__builtin_neon_vld2q_v: 3175 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld2q_v, E); 3176 case AArch64::BI__builtin_neon_vld3_v: 3177 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld3_v, E); 3178 case AArch64::BI__builtin_neon_vld3q_v: 3179 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld3q_v, E); 3180 case AArch64::BI__builtin_neon_vld4_v: 3181 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld4_v, E); 3182 case AArch64::BI__builtin_neon_vld4q_v: 3183 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld4q_v, E); 3184 case AArch64::BI__builtin_neon_vst1_v: 3185 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst1_v, E); 3186 case AArch64::BI__builtin_neon_vst1q_v: 3187 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst1q_v, E); 3188 case AArch64::BI__builtin_neon_vst2_v: 3189 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst2_v, E); 3190 case AArch64::BI__builtin_neon_vst2q_v: 3191 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst2q_v, E); 3192 case AArch64::BI__builtin_neon_vst3_v: 3193 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst3_v, E); 3194 case AArch64::BI__builtin_neon_vst3q_v: 3195 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst3q_v, E); 3196 case AArch64::BI__builtin_neon_vst4_v: 3197 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst4_v, E); 3198 case AArch64::BI__builtin_neon_vst4q_v: 3199 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst4q_v, E); 3200 case AArch64::BI__builtin_neon_vld1_x2_v: 3201 case AArch64::BI__builtin_neon_vld1q_x2_v: 3202 case AArch64::BI__builtin_neon_vld1_x3_v: 3203 case AArch64::BI__builtin_neon_vld1q_x3_v: 3204 case AArch64::BI__builtin_neon_vld1_x4_v: 3205 case AArch64::BI__builtin_neon_vld1q_x4_v: { 3206 unsigned Int; 3207 switch (BuiltinID) { 3208 case AArch64::BI__builtin_neon_vld1_x2_v: 3209 case AArch64::BI__builtin_neon_vld1q_x2_v: 3210 Int = Intrinsic::aarch64_neon_vld1x2; 3211 break; 3212 case AArch64::BI__builtin_neon_vld1_x3_v: 3213 case AArch64::BI__builtin_neon_vld1q_x3_v: 3214 Int = Intrinsic::aarch64_neon_vld1x3; 3215 break; 3216 case AArch64::BI__builtin_neon_vld1_x4_v: 3217 case AArch64::BI__builtin_neon_vld1q_x4_v: 3218 Int = Intrinsic::aarch64_neon_vld1x4; 3219 break; 3220 } 3221 Function *F = CGM.getIntrinsic(Int, Ty); 3222 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld1xN"); 3223 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 3224 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 3225 return Builder.CreateStore(Ops[1], Ops[0]); 3226 } 3227 case AArch64::BI__builtin_neon_vst1_x2_v: 3228 case AArch64::BI__builtin_neon_vst1q_x2_v: 3229 case AArch64::BI__builtin_neon_vst1_x3_v: 3230 case AArch64::BI__builtin_neon_vst1q_x3_v: 3231 case AArch64::BI__builtin_neon_vst1_x4_v: 3232 case AArch64::BI__builtin_neon_vst1q_x4_v: { 3233 Ops.push_back(Align); 3234 unsigned Int; 3235 switch (BuiltinID) { 3236 case AArch64::BI__builtin_neon_vst1_x2_v: 3237 case AArch64::BI__builtin_neon_vst1q_x2_v: 3238 Int = Intrinsic::aarch64_neon_vst1x2; 3239 break; 3240 case AArch64::BI__builtin_neon_vst1_x3_v: 3241 case AArch64::BI__builtin_neon_vst1q_x3_v: 3242 Int = Intrinsic::aarch64_neon_vst1x3; 3243 break; 3244 case AArch64::BI__builtin_neon_vst1_x4_v: 3245 case AArch64::BI__builtin_neon_vst1q_x4_v: 3246 Int = Intrinsic::aarch64_neon_vst1x4; 3247 break; 3248 } 3249 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, ""); 3250 } 3251 case AArch64::BI__builtin_neon_vld1_lane_v: 3252 case AArch64::BI__builtin_neon_vld1q_lane_v: { 3253 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 3254 Ty = llvm::PointerType::getUnqual(VTy->getElementType()); 3255 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 3256 LoadInst *Ld = Builder.CreateLoad(Ops[0]); 3257 Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue()); 3258 return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane"); 3259 } 3260 case AArch64::BI__builtin_neon_vld2_lane_v: 3261 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld2q_lane_v, E); 3262 case AArch64::BI__builtin_neon_vld2q_lane_v: 3263 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld2q_lane_v, E); 3264 case AArch64::BI__builtin_neon_vld3_lane_v: 3265 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld3_lane_v, E); 3266 case AArch64::BI__builtin_neon_vld3q_lane_v: 3267 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld3q_lane_v, E); 3268 case AArch64::BI__builtin_neon_vld4_lane_v: 3269 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld4_lane_v, E); 3270 case AArch64::BI__builtin_neon_vld4q_lane_v: 3271 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld4q_lane_v, E); 3272 case AArch64::BI__builtin_neon_vst1_lane_v: 3273 case AArch64::BI__builtin_neon_vst1q_lane_v: { 3274 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 3275 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]); 3276 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 3277 StoreInst *St = 3278 Builder.CreateStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty)); 3279 St->setAlignment(cast<ConstantInt>(Align)->getZExtValue()); 3280 return St; 3281 } 3282 case AArch64::BI__builtin_neon_vst2_lane_v: 3283 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst2_lane_v, E); 3284 case AArch64::BI__builtin_neon_vst2q_lane_v: 3285 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst2q_lane_v, E); 3286 case AArch64::BI__builtin_neon_vst3_lane_v: 3287 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst3_lane_v, E); 3288 case AArch64::BI__builtin_neon_vst3q_lane_v: 3289 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst3q_lane_v, E); 3290 case AArch64::BI__builtin_neon_vst4_lane_v: 3291 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst4_lane_v, E); 3292 case AArch64::BI__builtin_neon_vst4q_lane_v: 3293 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst4q_lane_v, E); 3294 case AArch64::BI__builtin_neon_vld1_dup_v: 3295 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld1_dup_v, E); 3296 case AArch64::BI__builtin_neon_vld1q_dup_v: 3297 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld1q_dup_v, E); 3298 case AArch64::BI__builtin_neon_vld2_dup_v: 3299 case AArch64::BI__builtin_neon_vld2q_dup_v: 3300 case AArch64::BI__builtin_neon_vld3_dup_v: 3301 case AArch64::BI__builtin_neon_vld3q_dup_v: 3302 case AArch64::BI__builtin_neon_vld4_dup_v: 3303 case AArch64::BI__builtin_neon_vld4q_dup_v: { 3304 // Handle 64-bit x 1 elements as a special-case. There is no "dup" needed. 3305 if (VTy->getElementType()->getPrimitiveSizeInBits() == 64 && 3306 VTy->getNumElements() == 1) { 3307 switch (BuiltinID) { 3308 case AArch64::BI__builtin_neon_vld2_dup_v: 3309 Int = Intrinsic::arm_neon_vld2; 3310 break; 3311 case AArch64::BI__builtin_neon_vld3_dup_v: 3312 Int = Intrinsic::arm_neon_vld3; 3313 break; 3314 case AArch64::BI__builtin_neon_vld4_dup_v: 3315 Int = Intrinsic::arm_neon_vld4; 3316 break; 3317 default: 3318 llvm_unreachable("unknown vld_dup intrinsic?"); 3319 } 3320 Function *F = CGM.getIntrinsic(Int, Ty); 3321 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld_dup"); 3322 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 3323 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 3324 return Builder.CreateStore(Ops[1], Ops[0]); 3325 } 3326 switch (BuiltinID) { 3327 case AArch64::BI__builtin_neon_vld2_dup_v: 3328 case AArch64::BI__builtin_neon_vld2q_dup_v: 3329 Int = Intrinsic::arm_neon_vld2lane; 3330 break; 3331 case AArch64::BI__builtin_neon_vld3_dup_v: 3332 case AArch64::BI__builtin_neon_vld3q_dup_v: 3333 Int = Intrinsic::arm_neon_vld3lane; 3334 break; 3335 case AArch64::BI__builtin_neon_vld4_dup_v: 3336 case AArch64::BI__builtin_neon_vld4q_dup_v: 3337 Int = Intrinsic::arm_neon_vld4lane; 3338 break; 3339 } 3340 Function *F = CGM.getIntrinsic(Int, Ty); 3341 llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType()); 3342 3343 SmallVector<Value *, 6> Args; 3344 Args.push_back(Ops[1]); 3345 Args.append(STy->getNumElements(), UndefValue::get(Ty)); 3346 3347 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0); 3348 Args.push_back(CI); 3349 Args.push_back(Align); 3350 3351 Ops[1] = Builder.CreateCall(F, Args, "vld_dup"); 3352 // splat lane 0 to all elts in each vector of the result. 3353 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 3354 Value *Val = Builder.CreateExtractValue(Ops[1], i); 3355 Value *Elt = Builder.CreateBitCast(Val, Ty); 3356 Elt = EmitNeonSplat(Elt, CI); 3357 Elt = Builder.CreateBitCast(Elt, Val->getType()); 3358 Ops[1] = Builder.CreateInsertValue(Ops[1], Elt, i); 3359 } 3360 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 3361 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 3362 return Builder.CreateStore(Ops[1], Ops[0]); 3363 } 3364 3365 // Crypto 3366 case AArch64::BI__builtin_neon_vaeseq_v: 3367 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_aese, Ty), 3368 Ops, "aese"); 3369 case AArch64::BI__builtin_neon_vaesdq_v: 3370 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_aesd, Ty), 3371 Ops, "aesd"); 3372 case AArch64::BI__builtin_neon_vaesmcq_v: 3373 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_aesmc, Ty), 3374 Ops, "aesmc"); 3375 case AArch64::BI__builtin_neon_vaesimcq_v: 3376 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_aesimc, Ty), 3377 Ops, "aesimc"); 3378 case AArch64::BI__builtin_neon_vsha1su1q_v: 3379 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1su1, Ty), 3380 Ops, "sha1su1"); 3381 case AArch64::BI__builtin_neon_vsha256su0q_v: 3382 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha256su0, Ty), 3383 Ops, "sha256su0"); 3384 case AArch64::BI__builtin_neon_vsha1su0q_v: 3385 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1su0, Ty), 3386 Ops, "sha1su0"); 3387 case AArch64::BI__builtin_neon_vsha256hq_v: 3388 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha256h, Ty), 3389 Ops, "sha256h"); 3390 case AArch64::BI__builtin_neon_vsha256h2q_v: 3391 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha256h2, Ty), 3392 Ops, "sha256h2"); 3393 case AArch64::BI__builtin_neon_vsha256su1q_v: 3394 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha256su1, Ty), 3395 Ops, "sha256su1"); 3396 case AArch64::BI__builtin_neon_vmul_lane_v: 3397 case AArch64::BI__builtin_neon_vmul_laneq_v: { 3398 // v1f64 vmul_lane should be mapped to Neon scalar mul lane 3399 bool Quad = false; 3400 if (BuiltinID == AArch64::BI__builtin_neon_vmul_laneq_v) 3401 Quad = true; 3402 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy); 3403 llvm::Type *VTy = GetNeonType(this, 3404 NeonTypeFlags(NeonTypeFlags::Float64, false, Quad ? true : false)); 3405 Ops[1] = Builder.CreateBitCast(Ops[1], VTy); 3406 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract"); 3407 Value *Result = Builder.CreateFMul(Ops[0], Ops[1]); 3408 return Builder.CreateBitCast(Result, Ty); 3409 } 3410 3411 // AArch64-only builtins 3412 case AArch64::BI__builtin_neon_vfmaq_laneq_v: { 3413 Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty); 3414 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 3415 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 3416 3417 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 3418 Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3])); 3419 return Builder.CreateCall3(F, Ops[2], Ops[1], Ops[0]); 3420 } 3421 case AArch64::BI__builtin_neon_vfmaq_lane_v: { 3422 Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty); 3423 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 3424 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 3425 3426 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty); 3427 llvm::Type *STy = llvm::VectorType::get(VTy->getElementType(), 3428 VTy->getNumElements() / 2); 3429 Ops[2] = Builder.CreateBitCast(Ops[2], STy); 3430 Value* SV = llvm::ConstantVector::getSplat(VTy->getNumElements(), 3431 cast<ConstantInt>(Ops[3])); 3432 Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane"); 3433 3434 return Builder.CreateCall3(F, Ops[2], Ops[1], Ops[0]); 3435 } 3436 case AArch64::BI__builtin_neon_vfma_lane_v: { 3437 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty); 3438 // v1f64 fma should be mapped to Neon scalar f64 fma 3439 if (VTy && VTy->getElementType() == DoubleTy) { 3440 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy); 3441 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy); 3442 llvm::Type *VTy = GetNeonType(this, 3443 NeonTypeFlags(NeonTypeFlags::Float64, false, false)); 3444 Ops[2] = Builder.CreateBitCast(Ops[2], VTy); 3445 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract"); 3446 Value *F = CGM.getIntrinsic(Intrinsic::fma, DoubleTy); 3447 Value *Result = Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]); 3448 return Builder.CreateBitCast(Result, Ty); 3449 } 3450 Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty); 3451 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 3452 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 3453 3454 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 3455 Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3])); 3456 return Builder.CreateCall3(F, Ops[2], Ops[1], Ops[0]); 3457 } 3458 case AArch64::BI__builtin_neon_vfma_laneq_v: { 3459 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty); 3460 // v1f64 fma should be mapped to Neon scalar f64 fma 3461 if (VTy && VTy->getElementType() == DoubleTy) { 3462 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy); 3463 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy); 3464 llvm::Type *VTy = GetNeonType(this, 3465 NeonTypeFlags(NeonTypeFlags::Float64, false, true)); 3466 Ops[2] = Builder.CreateBitCast(Ops[2], VTy); 3467 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract"); 3468 Value *F = CGM.getIntrinsic(Intrinsic::fma, DoubleTy); 3469 Value *Result = Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]); 3470 return Builder.CreateBitCast(Result, Ty); 3471 } 3472 Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty); 3473 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 3474 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 3475 3476 llvm::Type *STy = llvm::VectorType::get(VTy->getElementType(), 3477 VTy->getNumElements() * 2); 3478 Ops[2] = Builder.CreateBitCast(Ops[2], STy); 3479 Value* SV = llvm::ConstantVector::getSplat(VTy->getNumElements(), 3480 cast<ConstantInt>(Ops[3])); 3481 Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane"); 3482 3483 return Builder.CreateCall3(F, Ops[2], Ops[1], Ops[0]); 3484 } 3485 case AArch64::BI__builtin_neon_vfms_v: 3486 case AArch64::BI__builtin_neon_vfmsq_v: { 3487 Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty); 3488 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 3489 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 3490 Ops[1] = Builder.CreateFNeg(Ops[1]); 3491 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 3492 3493 // LLVM's fma intrinsic puts the accumulator in the last position, but the 3494 // AArch64 intrinsic has it first. 3495 return Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]); 3496 } 3497 case AArch64::BI__builtin_neon_vmaxnm_v: 3498 case AArch64::BI__builtin_neon_vmaxnmq_v: { 3499 Int = Intrinsic::aarch64_neon_vmaxnm; 3500 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm"); 3501 } 3502 case AArch64::BI__builtin_neon_vminnm_v: 3503 case AArch64::BI__builtin_neon_vminnmq_v: { 3504 Int = Intrinsic::aarch64_neon_vminnm; 3505 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm"); 3506 } 3507 case AArch64::BI__builtin_neon_vpmaxnm_v: 3508 case AArch64::BI__builtin_neon_vpmaxnmq_v: { 3509 Int = Intrinsic::aarch64_neon_vpmaxnm; 3510 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm"); 3511 } 3512 case AArch64::BI__builtin_neon_vpminnm_v: 3513 case AArch64::BI__builtin_neon_vpminnmq_v: { 3514 Int = Intrinsic::aarch64_neon_vpminnm; 3515 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm"); 3516 } 3517 case AArch64::BI__builtin_neon_vpmaxq_v: { 3518 Int = usgn ? Intrinsic::arm_neon_vpmaxu : Intrinsic::arm_neon_vpmaxs; 3519 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax"); 3520 } 3521 case AArch64::BI__builtin_neon_vpminq_v: { 3522 Int = usgn ? Intrinsic::arm_neon_vpminu : Intrinsic::arm_neon_vpmins; 3523 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin"); 3524 } 3525 case AArch64::BI__builtin_neon_vpaddq_v: { 3526 Int = Intrinsic::arm_neon_vpadd; 3527 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpadd"); 3528 } 3529 case AArch64::BI__builtin_neon_vmulx_v: 3530 case AArch64::BI__builtin_neon_vmulxq_v: { 3531 Int = Intrinsic::aarch64_neon_vmulx; 3532 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx"); 3533 } 3534 case AArch64::BI__builtin_neon_vpaddl_v: 3535 case AArch64::BI__builtin_neon_vpaddlq_v: 3536 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vpaddl_v, E); 3537 case AArch64::BI__builtin_neon_vpadal_v: 3538 case AArch64::BI__builtin_neon_vpadalq_v: 3539 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vpadal_v, E); 3540 case AArch64::BI__builtin_neon_vqabs_v: 3541 case AArch64::BI__builtin_neon_vqabsq_v: 3542 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqabs_v, E); 3543 case AArch64::BI__builtin_neon_vqneg_v: 3544 case AArch64::BI__builtin_neon_vqnegq_v: 3545 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqneg_v, E); 3546 case AArch64::BI__builtin_neon_vabs_v: 3547 case AArch64::BI__builtin_neon_vabsq_v: { 3548 if (VTy->getElementType()->isFloatingPointTy()) { 3549 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs"); 3550 } 3551 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vabs_v, E); 3552 } 3553 case AArch64::BI__builtin_neon_vsqadd_v: 3554 case AArch64::BI__builtin_neon_vsqaddq_v: { 3555 Int = Intrinsic::aarch64_neon_usqadd; 3556 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd"); 3557 } 3558 case AArch64::BI__builtin_neon_vuqadd_v: 3559 case AArch64::BI__builtin_neon_vuqaddq_v: { 3560 Int = Intrinsic::aarch64_neon_suqadd; 3561 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd"); 3562 } 3563 case AArch64::BI__builtin_neon_vcls_v: 3564 case AArch64::BI__builtin_neon_vclsq_v: 3565 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcls_v, E); 3566 case AArch64::BI__builtin_neon_vclz_v: 3567 case AArch64::BI__builtin_neon_vclzq_v: 3568 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vclz_v, E); 3569 case AArch64::BI__builtin_neon_vcnt_v: 3570 case AArch64::BI__builtin_neon_vcntq_v: 3571 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcnt_v, E); 3572 case AArch64::BI__builtin_neon_vrbit_v: 3573 case AArch64::BI__builtin_neon_vrbitq_v: 3574 Int = Intrinsic::aarch64_neon_rbit; 3575 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit"); 3576 case AArch64::BI__builtin_neon_vmovn_v: 3577 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmovn_v, E); 3578 case AArch64::BI__builtin_neon_vqmovun_v: 3579 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqmovun_v, E); 3580 case AArch64::BI__builtin_neon_vqmovn_v: 3581 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqmovn_v, E); 3582 case AArch64::BI__builtin_neon_vcvt_f16_v: 3583 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvt_f16_v, E); 3584 case AArch64::BI__builtin_neon_vcvt_f32_f16: 3585 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvt_f32_f16, E); 3586 case AArch64::BI__builtin_neon_vcvt_f32_f64: { 3587 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 3588 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, false)); 3589 return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt"); 3590 } 3591 case AArch64::BI__builtin_neon_vcvtx_f32_v: { 3592 llvm::Type *EltTy = FloatTy; 3593 llvm::Type *ResTy = llvm::VectorType::get(EltTy, 2); 3594 llvm::Type *Tys[2] = { ResTy, Ty }; 3595 Int = Intrinsic::aarch64_neon_fcvtxn; 3596 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtx_f32_f64"); 3597 } 3598 case AArch64::BI__builtin_neon_vcvt_f64_v: { 3599 llvm::Type *OpTy = 3600 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, false)); 3601 Ops[0] = Builder.CreateBitCast(Ops[0], OpTy); 3602 return Builder.CreateFPExt(Ops[0], Ty, "vcvt"); 3603 } 3604 case AArch64::BI__builtin_neon_vcvtq_f64_v: { 3605 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 3606 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, true)); 3607 return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt") 3608 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt"); 3609 } 3610 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqmovun_v, E); 3611 case AArch64::BI__builtin_neon_vrndn_v: 3612 case AArch64::BI__builtin_neon_vrndnq_v: { 3613 Int = Intrinsic::aarch64_neon_frintn; 3614 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn"); 3615 } 3616 case AArch64::BI__builtin_neon_vrnda_v: 3617 case AArch64::BI__builtin_neon_vrndaq_v: { 3618 Int = Intrinsic::round; 3619 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda"); 3620 } 3621 case AArch64::BI__builtin_neon_vrndp_v: 3622 case AArch64::BI__builtin_neon_vrndpq_v: { 3623 Int = Intrinsic::ceil; 3624 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp"); 3625 } 3626 case AArch64::BI__builtin_neon_vrndm_v: 3627 case AArch64::BI__builtin_neon_vrndmq_v: { 3628 Int = Intrinsic::floor; 3629 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm"); 3630 } 3631 case AArch64::BI__builtin_neon_vrndx_v: 3632 case AArch64::BI__builtin_neon_vrndxq_v: { 3633 Int = Intrinsic::rint; 3634 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx"); 3635 } 3636 case AArch64::BI__builtin_neon_vrnd_v: 3637 case AArch64::BI__builtin_neon_vrndq_v: { 3638 Int = Intrinsic::trunc; 3639 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd"); 3640 } 3641 case AArch64::BI__builtin_neon_vrndi_v: 3642 case AArch64::BI__builtin_neon_vrndiq_v: { 3643 Int = Intrinsic::nearbyint; 3644 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndi"); 3645 } 3646 case AArch64::BI__builtin_neon_vcvt_s32_v: 3647 case AArch64::BI__builtin_neon_vcvt_u32_v: 3648 case AArch64::BI__builtin_neon_vcvtq_s32_v: 3649 case AArch64::BI__builtin_neon_vcvtq_u32_v: 3650 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvtq_u32_v, E); 3651 case AArch64::BI__builtin_neon_vcvtq_s64_v: 3652 case AArch64::BI__builtin_neon_vcvtq_u64_v: { 3653 llvm::Type *DoubleTy = 3654 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, true)); 3655 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy); 3656 return usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt") 3657 : Builder.CreateFPToSI(Ops[0], Ty, "vcvt"); 3658 } 3659 case AArch64::BI__builtin_neon_vcvtn_s32_v: 3660 case AArch64::BI__builtin_neon_vcvtnq_s32_v: { 3661 llvm::Type *OpTy = llvm::VectorType::get(FloatTy, VTy->getNumElements()); 3662 llvm::Type *Tys[2] = { Ty, OpTy }; 3663 Int = Intrinsic::aarch64_neon_fcvtns; 3664 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtns_f32"); 3665 } 3666 case AArch64::BI__builtin_neon_vcvtnq_s64_v: { 3667 llvm::Type *OpTy = llvm::VectorType::get(DoubleTy, VTy->getNumElements()); 3668 llvm::Type *Tys[2] = { Ty, OpTy }; 3669 Int = Intrinsic::aarch64_neon_fcvtns; 3670 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtns_f64"); 3671 } 3672 case AArch64::BI__builtin_neon_vcvtn_u32_v: 3673 case AArch64::BI__builtin_neon_vcvtnq_u32_v: { 3674 llvm::Type *OpTy = llvm::VectorType::get(FloatTy, VTy->getNumElements()); 3675 llvm::Type *Tys[2] = { Ty, OpTy }; 3676 Int = Intrinsic::aarch64_neon_fcvtnu; 3677 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtnu_f32"); 3678 } 3679 case AArch64::BI__builtin_neon_vcvtnq_u64_v: { 3680 llvm::Type *OpTy = llvm::VectorType::get(DoubleTy, VTy->getNumElements()); 3681 llvm::Type *Tys[2] = { Ty, OpTy }; 3682 Int = Intrinsic::aarch64_neon_fcvtnu; 3683 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtnu_f64"); 3684 } 3685 case AArch64::BI__builtin_neon_vcvtp_s32_v: 3686 case AArch64::BI__builtin_neon_vcvtpq_s32_v: { 3687 llvm::Type *OpTy = llvm::VectorType::get(FloatTy, VTy->getNumElements()); 3688 llvm::Type *Tys[2] = { Ty, OpTy }; 3689 Int = Intrinsic::aarch64_neon_fcvtps; 3690 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtps_f32"); 3691 } 3692 case AArch64::BI__builtin_neon_vcvtpq_s64_v: { 3693 llvm::Type *OpTy = llvm::VectorType::get(DoubleTy, VTy->getNumElements()); 3694 llvm::Type *Tys[2] = { Ty, OpTy }; 3695 Int = Intrinsic::aarch64_neon_fcvtps; 3696 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtps_f64"); 3697 } 3698 case AArch64::BI__builtin_neon_vcvtp_u32_v: 3699 case AArch64::BI__builtin_neon_vcvtpq_u32_v: { 3700 llvm::Type *OpTy = llvm::VectorType::get(FloatTy, VTy->getNumElements()); 3701 llvm::Type *Tys[2] = { Ty, OpTy }; 3702 Int = Intrinsic::aarch64_neon_fcvtpu; 3703 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtpu_f32"); 3704 } 3705 case AArch64::BI__builtin_neon_vcvtpq_u64_v: { 3706 llvm::Type *OpTy = llvm::VectorType::get(DoubleTy, VTy->getNumElements()); 3707 llvm::Type *Tys[2] = { Ty, OpTy }; 3708 Int = Intrinsic::aarch64_neon_fcvtpu; 3709 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtpu_f64"); 3710 } 3711 case AArch64::BI__builtin_neon_vcvtm_s32_v: 3712 case AArch64::BI__builtin_neon_vcvtmq_s32_v: { 3713 llvm::Type *OpTy = llvm::VectorType::get(FloatTy, VTy->getNumElements()); 3714 llvm::Type *Tys[2] = { Ty, OpTy }; 3715 Int = Intrinsic::aarch64_neon_fcvtms; 3716 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtms_f32"); 3717 } 3718 case AArch64::BI__builtin_neon_vcvtmq_s64_v: { 3719 llvm::Type *OpTy = llvm::VectorType::get(DoubleTy, VTy->getNumElements()); 3720 llvm::Type *Tys[2] = { Ty, OpTy }; 3721 Int = Intrinsic::aarch64_neon_fcvtms; 3722 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtms_f64"); 3723 } 3724 case AArch64::BI__builtin_neon_vcvtm_u32_v: 3725 case AArch64::BI__builtin_neon_vcvtmq_u32_v: { 3726 llvm::Type *OpTy = llvm::VectorType::get(FloatTy, VTy->getNumElements()); 3727 llvm::Type *Tys[2] = { Ty, OpTy }; 3728 Int = Intrinsic::aarch64_neon_fcvtmu; 3729 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtmu_f32"); 3730 } 3731 case AArch64::BI__builtin_neon_vcvtmq_u64_v: { 3732 llvm::Type *OpTy = llvm::VectorType::get(DoubleTy, VTy->getNumElements()); 3733 llvm::Type *Tys[2] = { Ty, OpTy }; 3734 Int = Intrinsic::aarch64_neon_fcvtmu; 3735 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtmu_f64"); 3736 } 3737 case AArch64::BI__builtin_neon_vcvta_s32_v: 3738 case AArch64::BI__builtin_neon_vcvtaq_s32_v: { 3739 llvm::Type *OpTy = llvm::VectorType::get(FloatTy, VTy->getNumElements()); 3740 llvm::Type *Tys[2] = { Ty, OpTy }; 3741 Int = Intrinsic::aarch64_neon_fcvtas; 3742 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtas_f32"); 3743 } 3744 case AArch64::BI__builtin_neon_vcvtaq_s64_v: { 3745 llvm::Type *OpTy = llvm::VectorType::get(DoubleTy, VTy->getNumElements()); 3746 llvm::Type *Tys[2] = { Ty, OpTy }; 3747 Int = Intrinsic::aarch64_neon_fcvtas; 3748 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtas_f64"); 3749 } 3750 case AArch64::BI__builtin_neon_vcvta_u32_v: 3751 case AArch64::BI__builtin_neon_vcvtaq_u32_v: { 3752 llvm::Type *OpTy = llvm::VectorType::get(FloatTy, VTy->getNumElements()); 3753 llvm::Type *Tys[2] = { Ty, OpTy }; 3754 Int = Intrinsic::aarch64_neon_fcvtau; 3755 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtau_f32"); 3756 } 3757 case AArch64::BI__builtin_neon_vcvtaq_u64_v: { 3758 llvm::Type *OpTy = llvm::VectorType::get(DoubleTy, VTy->getNumElements()); 3759 llvm::Type *Tys[2] = { Ty, OpTy }; 3760 Int = Intrinsic::aarch64_neon_fcvtau; 3761 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtau_f64"); 3762 } 3763 case AArch64::BI__builtin_neon_vrecpe_v: 3764 case AArch64::BI__builtin_neon_vrecpeq_v: 3765 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrecpe_v, E); 3766 case AArch64::BI__builtin_neon_vrsqrte_v: 3767 case AArch64::BI__builtin_neon_vrsqrteq_v: 3768 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrsqrte_v, E); 3769 case AArch64::BI__builtin_neon_vsqrt_v: 3770 case AArch64::BI__builtin_neon_vsqrtq_v: { 3771 Int = Intrinsic::aarch64_neon_fsqrt; 3772 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt"); 3773 } 3774 case AArch64::BI__builtin_neon_vcvt_f32_v: 3775 case AArch64::BI__builtin_neon_vcvtq_f32_v: 3776 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvt_f32_v, E); 3777 case AArch64::BI__builtin_neon_vceqz_v: 3778 case AArch64::BI__builtin_neon_vceqzq_v: 3779 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ, 3780 ICmpInst::ICMP_EQ, "vceqz"); 3781 case AArch64::BI__builtin_neon_vcgez_v: 3782 case AArch64::BI__builtin_neon_vcgezq_v: 3783 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE, 3784 ICmpInst::ICMP_SGE, "vcgez"); 3785 case AArch64::BI__builtin_neon_vclez_v: 3786 case AArch64::BI__builtin_neon_vclezq_v: 3787 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE, 3788 ICmpInst::ICMP_SLE, "vclez"); 3789 case AArch64::BI__builtin_neon_vcgtz_v: 3790 case AArch64::BI__builtin_neon_vcgtzq_v: 3791 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT, 3792 ICmpInst::ICMP_SGT, "vcgtz"); 3793 case AArch64::BI__builtin_neon_vcltz_v: 3794 case AArch64::BI__builtin_neon_vcltzq_v: 3795 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT, 3796 ICmpInst::ICMP_SLT, "vcltz"); 3797 } 3798} 3799 3800Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, 3801 const CallExpr *E) { 3802 if (BuiltinID == ARM::BI__clear_cache) { 3803 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments"); 3804 const FunctionDecl *FD = E->getDirectCallee(); 3805 SmallVector<Value*, 2> Ops; 3806 for (unsigned i = 0; i < 2; i++) 3807 Ops.push_back(EmitScalarExpr(E->getArg(i))); 3808 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType()); 3809 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty); 3810 StringRef Name = FD->getName(); 3811 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops); 3812 } 3813 3814 if (BuiltinID == ARM::BI__builtin_arm_ldrexd || 3815 (BuiltinID == ARM::BI__builtin_arm_ldrex && 3816 getContext().getTypeSize(E->getType()) == 64)) { 3817 Function *F = CGM.getIntrinsic(Intrinsic::arm_ldrexd); 3818 3819 Value *LdPtr = EmitScalarExpr(E->getArg(0)); 3820 Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy), 3821 "ldrexd"); 3822 3823 Value *Val0 = Builder.CreateExtractValue(Val, 1); 3824 Value *Val1 = Builder.CreateExtractValue(Val, 0); 3825 Val0 = Builder.CreateZExt(Val0, Int64Ty); 3826 Val1 = Builder.CreateZExt(Val1, Int64Ty); 3827 3828 Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32); 3829 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */); 3830 Val = Builder.CreateOr(Val, Val1); 3831 return Builder.CreateBitCast(Val, ConvertType(E->getType())); 3832 } 3833 3834 if (BuiltinID == ARM::BI__builtin_arm_ldrex) { 3835 Value *LoadAddr = EmitScalarExpr(E->getArg(0)); 3836 3837 QualType Ty = E->getType(); 3838 llvm::Type *RealResTy = ConvertType(Ty); 3839 llvm::Type *IntResTy = llvm::IntegerType::get(getLLVMContext(), 3840 getContext().getTypeSize(Ty)); 3841 LoadAddr = Builder.CreateBitCast(LoadAddr, IntResTy->getPointerTo()); 3842 3843 Function *F = CGM.getIntrinsic(Intrinsic::arm_ldrex, LoadAddr->getType()); 3844 Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex"); 3845 3846 if (RealResTy->isPointerTy()) 3847 return Builder.CreateIntToPtr(Val, RealResTy); 3848 else { 3849 Val = Builder.CreateTruncOrBitCast(Val, IntResTy); 3850 return Builder.CreateBitCast(Val, RealResTy); 3851 } 3852 } 3853 3854 if (BuiltinID == ARM::BI__builtin_arm_strexd || 3855 (BuiltinID == ARM::BI__builtin_arm_strex && 3856 getContext().getTypeSize(E->getArg(0)->getType()) == 64)) { 3857 Function *F = CGM.getIntrinsic(Intrinsic::arm_strexd); 3858 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, NULL); 3859 3860 Value *Tmp = CreateMemTemp(E->getArg(0)->getType()); 3861 Value *Val = EmitScalarExpr(E->getArg(0)); 3862 Builder.CreateStore(Val, Tmp); 3863 3864 Value *LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy)); 3865 Val = Builder.CreateLoad(LdPtr); 3866 3867 Value *Arg0 = Builder.CreateExtractValue(Val, 0); 3868 Value *Arg1 = Builder.CreateExtractValue(Val, 1); 3869 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy); 3870 return Builder.CreateCall3(F, Arg0, Arg1, StPtr, "strexd"); 3871 } 3872 3873 if (BuiltinID == ARM::BI__builtin_arm_strex) { 3874 Value *StoreVal = EmitScalarExpr(E->getArg(0)); 3875 Value *StoreAddr = EmitScalarExpr(E->getArg(1)); 3876 3877 QualType Ty = E->getArg(0)->getType(); 3878 llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(), 3879 getContext().getTypeSize(Ty)); 3880 StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo()); 3881 3882 if (StoreVal->getType()->isPointerTy()) 3883 StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty); 3884 else { 3885 StoreVal = Builder.CreateBitCast(StoreVal, StoreTy); 3886 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty); 3887 } 3888 3889 Function *F = CGM.getIntrinsic(Intrinsic::arm_strex, StoreAddr->getType()); 3890 return Builder.CreateCall2(F, StoreVal, StoreAddr, "strex"); 3891 } 3892 3893 if (BuiltinID == ARM::BI__builtin_arm_clrex) { 3894 Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex); 3895 return Builder.CreateCall(F); 3896 } 3897 3898 if (BuiltinID == ARM::BI__builtin_arm_sevl) { 3899 Function *F = CGM.getIntrinsic(Intrinsic::arm_sevl); 3900 return Builder.CreateCall(F); 3901 } 3902 3903 // CRC32 3904 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic; 3905 switch (BuiltinID) { 3906 case ARM::BI__builtin_arm_crc32b: 3907 CRCIntrinsicID = Intrinsic::arm_crc32b; break; 3908 case ARM::BI__builtin_arm_crc32cb: 3909 CRCIntrinsicID = Intrinsic::arm_crc32cb; break; 3910 case ARM::BI__builtin_arm_crc32h: 3911 CRCIntrinsicID = Intrinsic::arm_crc32h; break; 3912 case ARM::BI__builtin_arm_crc32ch: 3913 CRCIntrinsicID = Intrinsic::arm_crc32ch; break; 3914 case ARM::BI__builtin_arm_crc32w: 3915 case ARM::BI__builtin_arm_crc32d: 3916 CRCIntrinsicID = Intrinsic::arm_crc32w; break; 3917 case ARM::BI__builtin_arm_crc32cw: 3918 case ARM::BI__builtin_arm_crc32cd: 3919 CRCIntrinsicID = Intrinsic::arm_crc32cw; break; 3920 } 3921 3922 if (CRCIntrinsicID != Intrinsic::not_intrinsic) { 3923 Value *Arg0 = EmitScalarExpr(E->getArg(0)); 3924 Value *Arg1 = EmitScalarExpr(E->getArg(1)); 3925 3926 // crc32{c,}d intrinsics are implemnted as two calls to crc32{c,}w 3927 // intrinsics, hence we need different codegen for these cases. 3928 if (BuiltinID == ARM::BI__builtin_arm_crc32d || 3929 BuiltinID == ARM::BI__builtin_arm_crc32cd) { 3930 Value *C1 = llvm::ConstantInt::get(Int64Ty, 32); 3931 Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty); 3932 Value *Arg1b = Builder.CreateLShr(Arg1, C1); 3933 Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty); 3934 3935 Function *F = CGM.getIntrinsic(CRCIntrinsicID); 3936 Value *Res = Builder.CreateCall2(F, Arg0, Arg1a); 3937 return Builder.CreateCall2(F, Res, Arg1b); 3938 } else { 3939 Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty); 3940 3941 Function *F = CGM.getIntrinsic(CRCIntrinsicID); 3942 return Builder.CreateCall2(F, Arg0, Arg1); 3943 } 3944 } 3945 3946 SmallVector<Value*, 4> Ops; 3947 llvm::Value *Align = 0; 3948 for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) { 3949 if (i == 0) { 3950 switch (BuiltinID) { 3951 case ARM::BI__builtin_neon_vld1_v: 3952 case ARM::BI__builtin_neon_vld1q_v: 3953 case ARM::BI__builtin_neon_vld1q_lane_v: 3954 case ARM::BI__builtin_neon_vld1_lane_v: 3955 case ARM::BI__builtin_neon_vld1_dup_v: 3956 case ARM::BI__builtin_neon_vld1q_dup_v: 3957 case ARM::BI__builtin_neon_vst1_v: 3958 case ARM::BI__builtin_neon_vst1q_v: 3959 case ARM::BI__builtin_neon_vst1q_lane_v: 3960 case ARM::BI__builtin_neon_vst1_lane_v: 3961 case ARM::BI__builtin_neon_vst2_v: 3962 case ARM::BI__builtin_neon_vst2q_v: 3963 case ARM::BI__builtin_neon_vst2_lane_v: 3964 case ARM::BI__builtin_neon_vst2q_lane_v: 3965 case ARM::BI__builtin_neon_vst3_v: 3966 case ARM::BI__builtin_neon_vst3q_v: 3967 case ARM::BI__builtin_neon_vst3_lane_v: 3968 case ARM::BI__builtin_neon_vst3q_lane_v: 3969 case ARM::BI__builtin_neon_vst4_v: 3970 case ARM::BI__builtin_neon_vst4q_v: 3971 case ARM::BI__builtin_neon_vst4_lane_v: 3972 case ARM::BI__builtin_neon_vst4q_lane_v: 3973 // Get the alignment for the argument in addition to the value; 3974 // we'll use it later. 3975 std::pair<llvm::Value*, unsigned> Src = 3976 EmitPointerWithAlignment(E->getArg(0)); 3977 Ops.push_back(Src.first); 3978 Align = Builder.getInt32(Src.second); 3979 continue; 3980 } 3981 } 3982 if (i == 1) { 3983 switch (BuiltinID) { 3984 case ARM::BI__builtin_neon_vld2_v: 3985 case ARM::BI__builtin_neon_vld2q_v: 3986 case ARM::BI__builtin_neon_vld3_v: 3987 case ARM::BI__builtin_neon_vld3q_v: 3988 case ARM::BI__builtin_neon_vld4_v: 3989 case ARM::BI__builtin_neon_vld4q_v: 3990 case ARM::BI__builtin_neon_vld2_lane_v: 3991 case ARM::BI__builtin_neon_vld2q_lane_v: 3992 case ARM::BI__builtin_neon_vld3_lane_v: 3993 case ARM::BI__builtin_neon_vld3q_lane_v: 3994 case ARM::BI__builtin_neon_vld4_lane_v: 3995 case ARM::BI__builtin_neon_vld4q_lane_v: 3996 case ARM::BI__builtin_neon_vld2_dup_v: 3997 case ARM::BI__builtin_neon_vld3_dup_v: 3998 case ARM::BI__builtin_neon_vld4_dup_v: 3999 // Get the alignment for the argument in addition to the value; 4000 // we'll use it later. 4001 std::pair<llvm::Value*, unsigned> Src = 4002 EmitPointerWithAlignment(E->getArg(1)); 4003 Ops.push_back(Src.first); 4004 Align = Builder.getInt32(Src.second); 4005 continue; 4006 } 4007 } 4008 Ops.push_back(EmitScalarExpr(E->getArg(i))); 4009 } 4010 4011 // vget_lane and vset_lane are not overloaded and do not have an extra 4012 // argument that specifies the vector type. 4013 switch (BuiltinID) { 4014 default: break; 4015 case ARM::BI__builtin_neon_vget_lane_i8: 4016 case ARM::BI__builtin_neon_vget_lane_i16: 4017 case ARM::BI__builtin_neon_vget_lane_i32: 4018 case ARM::BI__builtin_neon_vget_lane_i64: 4019 case ARM::BI__builtin_neon_vget_lane_f32: 4020 case ARM::BI__builtin_neon_vgetq_lane_i8: 4021 case ARM::BI__builtin_neon_vgetq_lane_i16: 4022 case ARM::BI__builtin_neon_vgetq_lane_i32: 4023 case ARM::BI__builtin_neon_vgetq_lane_i64: 4024 case ARM::BI__builtin_neon_vgetq_lane_f32: 4025 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), 4026 "vget_lane"); 4027 case ARM::BI__builtin_neon_vset_lane_i8: 4028 case ARM::BI__builtin_neon_vset_lane_i16: 4029 case ARM::BI__builtin_neon_vset_lane_i32: 4030 case ARM::BI__builtin_neon_vset_lane_i64: 4031 case ARM::BI__builtin_neon_vset_lane_f32: 4032 case ARM::BI__builtin_neon_vsetq_lane_i8: 4033 case ARM::BI__builtin_neon_vsetq_lane_i16: 4034 case ARM::BI__builtin_neon_vsetq_lane_i32: 4035 case ARM::BI__builtin_neon_vsetq_lane_i64: 4036 case ARM::BI__builtin_neon_vsetq_lane_f32: 4037 Ops.push_back(EmitScalarExpr(E->getArg(2))); 4038 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane"); 4039 } 4040 4041 // Get the last argument, which specifies the vector type. 4042 llvm::APSInt Result; 4043 const Expr *Arg = E->getArg(E->getNumArgs()-1); 4044 if (!Arg->isIntegerConstantExpr(Result, getContext())) 4045 return 0; 4046 4047 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f || 4048 BuiltinID == ARM::BI__builtin_arm_vcvtr_d) { 4049 // Determine the overloaded type of this builtin. 4050 llvm::Type *Ty; 4051 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f) 4052 Ty = FloatTy; 4053 else 4054 Ty = DoubleTy; 4055 4056 // Determine whether this is an unsigned conversion or not. 4057 bool usgn = Result.getZExtValue() == 1; 4058 unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr; 4059 4060 // Call the appropriate intrinsic. 4061 Function *F = CGM.getIntrinsic(Int, Ty); 4062 return Builder.CreateCall(F, Ops, "vcvtr"); 4063 } 4064 4065 // Determine the type of this overloaded NEON intrinsic. 4066 NeonTypeFlags Type(Result.getZExtValue()); 4067 bool usgn = Type.isUnsigned(); 4068 bool quad = Type.isQuad(); 4069 bool rightShift = false; 4070 4071 llvm::VectorType *VTy = GetNeonType(this, Type); 4072 llvm::Type *Ty = VTy; 4073 if (!Ty) 4074 return 0; 4075 4076 unsigned Int; 4077 switch (BuiltinID) { 4078 default: return 0; 4079 case ARM::BI__builtin_neon_vbsl_v: 4080 case ARM::BI__builtin_neon_vbslq_v: 4081 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vbsl, Ty), 4082 Ops, "vbsl"); 4083 case ARM::BI__builtin_neon_vabd_v: 4084 case ARM::BI__builtin_neon_vabdq_v: 4085 Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds; 4086 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd"); 4087 case ARM::BI__builtin_neon_vabs_v: 4088 case ARM::BI__builtin_neon_vabsq_v: 4089 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vabs, Ty), 4090 Ops, "vabs"); 4091 case ARM::BI__builtin_neon_vaddhn_v: { 4092 llvm::VectorType *SrcTy = 4093 llvm::VectorType::getExtendedElementVectorType(VTy); 4094 4095 // %sum = add <4 x i32> %lhs, %rhs 4096 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy); 4097 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy); 4098 Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn"); 4099 4100 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16> 4101 Constant *ShiftAmt = ConstantInt::get(SrcTy->getElementType(), 4102 SrcTy->getScalarSizeInBits() / 2); 4103 ShiftAmt = ConstantVector::getSplat(VTy->getNumElements(), ShiftAmt); 4104 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn"); 4105 4106 // %res = trunc <4 x i32> %high to <4 x i16> 4107 return Builder.CreateTrunc(Ops[0], VTy, "vaddhn"); 4108 } 4109 case ARM::BI__builtin_neon_vcale_v: 4110 std::swap(Ops[0], Ops[1]); 4111 case ARM::BI__builtin_neon_vcage_v: { 4112 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacged); 4113 return EmitNeonCall(F, Ops, "vcage"); 4114 } 4115 case ARM::BI__builtin_neon_vcaleq_v: 4116 std::swap(Ops[0], Ops[1]); 4117 case ARM::BI__builtin_neon_vcageq_v: { 4118 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgeq); 4119 return EmitNeonCall(F, Ops, "vcage"); 4120 } 4121 case ARM::BI__builtin_neon_vcalt_v: 4122 std::swap(Ops[0], Ops[1]); 4123 case ARM::BI__builtin_neon_vcagt_v: { 4124 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtd); 4125 return EmitNeonCall(F, Ops, "vcagt"); 4126 } 4127 case ARM::BI__builtin_neon_vcaltq_v: 4128 std::swap(Ops[0], Ops[1]); 4129 case ARM::BI__builtin_neon_vcagtq_v: { 4130 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtq); 4131 return EmitNeonCall(F, Ops, "vcagt"); 4132 } 4133 case ARM::BI__builtin_neon_vcls_v: 4134 case ARM::BI__builtin_neon_vclsq_v: { 4135 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcls, Ty); 4136 return EmitNeonCall(F, Ops, "vcls"); 4137 } 4138 case ARM::BI__builtin_neon_vclz_v: 4139 case ARM::BI__builtin_neon_vclzq_v: { 4140 // Generate target-independent intrinsic; also need to add second argument 4141 // for whether or not clz of zero is undefined; on ARM it isn't. 4142 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ty); 4143 Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef())); 4144 return EmitNeonCall(F, Ops, "vclz"); 4145 } 4146 case ARM::BI__builtin_neon_vcnt_v: 4147 case ARM::BI__builtin_neon_vcntq_v: { 4148 // generate target-independent intrinsic 4149 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, Ty); 4150 return EmitNeonCall(F, Ops, "vctpop"); 4151 } 4152 case ARM::BI__builtin_neon_vcvt_f16_v: { 4153 assert(Type.getEltType() == NeonTypeFlags::Float16 && !quad && 4154 "unexpected vcvt_f16_v builtin"); 4155 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcvtfp2hf); 4156 return EmitNeonCall(F, Ops, "vcvt"); 4157 } 4158 case ARM::BI__builtin_neon_vcvt_f32_f16: { 4159 assert(Type.getEltType() == NeonTypeFlags::Float16 && !quad && 4160 "unexpected vcvt_f32_f16 builtin"); 4161 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcvthf2fp); 4162 return EmitNeonCall(F, Ops, "vcvt"); 4163 } 4164 case ARM::BI__builtin_neon_vcvt_f32_v: 4165 case ARM::BI__builtin_neon_vcvtq_f32_v: 4166 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 4167 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad)); 4168 return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt") 4169 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt"); 4170 case ARM::BI__builtin_neon_vcvt_s32_v: 4171 case ARM::BI__builtin_neon_vcvt_u32_v: 4172 case ARM::BI__builtin_neon_vcvtq_s32_v: 4173 case ARM::BI__builtin_neon_vcvtq_u32_v: { 4174 llvm::Type *FloatTy = 4175 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad)); 4176 Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy); 4177 return usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt") 4178 : Builder.CreateFPToSI(Ops[0], Ty, "vcvt"); 4179 } 4180 case ARM::BI__builtin_neon_vcvt_n_f32_v: 4181 case ARM::BI__builtin_neon_vcvtq_n_f32_v: { 4182 llvm::Type *FloatTy = 4183 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad)); 4184 llvm::Type *Tys[2] = { FloatTy, Ty }; 4185 Int = usgn ? Intrinsic::arm_neon_vcvtfxu2fp 4186 : Intrinsic::arm_neon_vcvtfxs2fp; 4187 Function *F = CGM.getIntrinsic(Int, Tys); 4188 return EmitNeonCall(F, Ops, "vcvt_n"); 4189 } 4190 case ARM::BI__builtin_neon_vcvt_n_s32_v: 4191 case ARM::BI__builtin_neon_vcvt_n_u32_v: 4192 case ARM::BI__builtin_neon_vcvtq_n_s32_v: 4193 case ARM::BI__builtin_neon_vcvtq_n_u32_v: { 4194 llvm::Type *FloatTy = 4195 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad)); 4196 llvm::Type *Tys[2] = { Ty, FloatTy }; 4197 Int = usgn ? Intrinsic::arm_neon_vcvtfp2fxu 4198 : Intrinsic::arm_neon_vcvtfp2fxs; 4199 Function *F = CGM.getIntrinsic(Int, Tys); 4200 return EmitNeonCall(F, Ops, "vcvt_n"); 4201 } 4202 case ARM::BI__builtin_neon_vext_v: 4203 case ARM::BI__builtin_neon_vextq_v: { 4204 int CV = cast<ConstantInt>(Ops[2])->getSExtValue(); 4205 SmallVector<Constant*, 16> Indices; 4206 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) 4207 Indices.push_back(ConstantInt::get(Int32Ty, i+CV)); 4208 4209 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 4210 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 4211 Value *SV = llvm::ConstantVector::get(Indices); 4212 return Builder.CreateShuffleVector(Ops[0], Ops[1], SV, "vext"); 4213 } 4214 case ARM::BI__builtin_neon_vhadd_v: 4215 case ARM::BI__builtin_neon_vhaddq_v: 4216 Int = usgn ? Intrinsic::arm_neon_vhaddu : Intrinsic::arm_neon_vhadds; 4217 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vhadd"); 4218 case ARM::BI__builtin_neon_vhsub_v: 4219 case ARM::BI__builtin_neon_vhsubq_v: 4220 Int = usgn ? Intrinsic::arm_neon_vhsubu : Intrinsic::arm_neon_vhsubs; 4221 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vhsub"); 4222 case ARM::BI__builtin_neon_vld1_v: 4223 case ARM::BI__builtin_neon_vld1q_v: 4224 Ops.push_back(Align); 4225 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty), 4226 Ops, "vld1"); 4227 case ARM::BI__builtin_neon_vld1q_lane_v: 4228 // Handle 64-bit integer elements as a special case. Use shuffles of 4229 // one-element vectors to avoid poor code for i64 in the backend. 4230 if (VTy->getElementType()->isIntegerTy(64)) { 4231 // Extract the other lane. 4232 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 4233 int Lane = cast<ConstantInt>(Ops[2])->getZExtValue(); 4234 Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane)); 4235 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV); 4236 // Load the value as a one-element vector. 4237 Ty = llvm::VectorType::get(VTy->getElementType(), 1); 4238 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty); 4239 Value *Ld = Builder.CreateCall2(F, Ops[0], Align); 4240 // Combine them. 4241 SmallVector<Constant*, 2> Indices; 4242 Indices.push_back(ConstantInt::get(Int32Ty, 1-Lane)); 4243 Indices.push_back(ConstantInt::get(Int32Ty, Lane)); 4244 SV = llvm::ConstantVector::get(Indices); 4245 return Builder.CreateShuffleVector(Ops[1], Ld, SV, "vld1q_lane"); 4246 } 4247 // fall through 4248 case ARM::BI__builtin_neon_vld1_lane_v: { 4249 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 4250 Ty = llvm::PointerType::getUnqual(VTy->getElementType()); 4251 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 4252 LoadInst *Ld = Builder.CreateLoad(Ops[0]); 4253 Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue()); 4254 return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane"); 4255 } 4256 case ARM::BI__builtin_neon_vld1_dup_v: 4257 case ARM::BI__builtin_neon_vld1q_dup_v: { 4258 Value *V = UndefValue::get(Ty); 4259 Ty = llvm::PointerType::getUnqual(VTy->getElementType()); 4260 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 4261 LoadInst *Ld = Builder.CreateLoad(Ops[0]); 4262 Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue()); 4263 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0); 4264 Ops[0] = Builder.CreateInsertElement(V, Ld, CI); 4265 return EmitNeonSplat(Ops[0], CI); 4266 } 4267 case ARM::BI__builtin_neon_vld2_v: 4268 case ARM::BI__builtin_neon_vld2q_v: { 4269 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2, Ty); 4270 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld2"); 4271 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 4272 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 4273 return Builder.CreateStore(Ops[1], Ops[0]); 4274 } 4275 case ARM::BI__builtin_neon_vld3_v: 4276 case ARM::BI__builtin_neon_vld3q_v: { 4277 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3, Ty); 4278 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld3"); 4279 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 4280 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 4281 return Builder.CreateStore(Ops[1], Ops[0]); 4282 } 4283 case ARM::BI__builtin_neon_vld4_v: 4284 case ARM::BI__builtin_neon_vld4q_v: { 4285 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4, Ty); 4286 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld4"); 4287 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 4288 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 4289 return Builder.CreateStore(Ops[1], Ops[0]); 4290 } 4291 case ARM::BI__builtin_neon_vld2_lane_v: 4292 case ARM::BI__builtin_neon_vld2q_lane_v: { 4293 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2lane, Ty); 4294 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 4295 Ops[3] = Builder.CreateBitCast(Ops[3], Ty); 4296 Ops.push_back(Align); 4297 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane"); 4298 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 4299 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 4300 return Builder.CreateStore(Ops[1], Ops[0]); 4301 } 4302 case ARM::BI__builtin_neon_vld3_lane_v: 4303 case ARM::BI__builtin_neon_vld3q_lane_v: { 4304 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3lane, Ty); 4305 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 4306 Ops[3] = Builder.CreateBitCast(Ops[3], Ty); 4307 Ops[4] = Builder.CreateBitCast(Ops[4], Ty); 4308 Ops.push_back(Align); 4309 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane"); 4310 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 4311 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 4312 return Builder.CreateStore(Ops[1], Ops[0]); 4313 } 4314 case ARM::BI__builtin_neon_vld4_lane_v: 4315 case ARM::BI__builtin_neon_vld4q_lane_v: { 4316 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4lane, Ty); 4317 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 4318 Ops[3] = Builder.CreateBitCast(Ops[3], Ty); 4319 Ops[4] = Builder.CreateBitCast(Ops[4], Ty); 4320 Ops[5] = Builder.CreateBitCast(Ops[5], Ty); 4321 Ops.push_back(Align); 4322 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane"); 4323 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 4324 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 4325 return Builder.CreateStore(Ops[1], Ops[0]); 4326 } 4327 case ARM::BI__builtin_neon_vld2_dup_v: 4328 case ARM::BI__builtin_neon_vld3_dup_v: 4329 case ARM::BI__builtin_neon_vld4_dup_v: { 4330 // Handle 64-bit elements as a special-case. There is no "dup" needed. 4331 if (VTy->getElementType()->getPrimitiveSizeInBits() == 64) { 4332 switch (BuiltinID) { 4333 case ARM::BI__builtin_neon_vld2_dup_v: 4334 Int = Intrinsic::arm_neon_vld2; 4335 break; 4336 case ARM::BI__builtin_neon_vld3_dup_v: 4337 Int = Intrinsic::arm_neon_vld3; 4338 break; 4339 case ARM::BI__builtin_neon_vld4_dup_v: 4340 Int = Intrinsic::arm_neon_vld4; 4341 break; 4342 default: llvm_unreachable("unknown vld_dup intrinsic?"); 4343 } 4344 Function *F = CGM.getIntrinsic(Int, Ty); 4345 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld_dup"); 4346 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 4347 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 4348 return Builder.CreateStore(Ops[1], Ops[0]); 4349 } 4350 switch (BuiltinID) { 4351 case ARM::BI__builtin_neon_vld2_dup_v: 4352 Int = Intrinsic::arm_neon_vld2lane; 4353 break; 4354 case ARM::BI__builtin_neon_vld3_dup_v: 4355 Int = Intrinsic::arm_neon_vld3lane; 4356 break; 4357 case ARM::BI__builtin_neon_vld4_dup_v: 4358 Int = Intrinsic::arm_neon_vld4lane; 4359 break; 4360 default: llvm_unreachable("unknown vld_dup intrinsic?"); 4361 } 4362 Function *F = CGM.getIntrinsic(Int, Ty); 4363 llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType()); 4364 4365 SmallVector<Value*, 6> Args; 4366 Args.push_back(Ops[1]); 4367 Args.append(STy->getNumElements(), UndefValue::get(Ty)); 4368 4369 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0); 4370 Args.push_back(CI); 4371 Args.push_back(Align); 4372 4373 Ops[1] = Builder.CreateCall(F, Args, "vld_dup"); 4374 // splat lane 0 to all elts in each vector of the result. 4375 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 4376 Value *Val = Builder.CreateExtractValue(Ops[1], i); 4377 Value *Elt = Builder.CreateBitCast(Val, Ty); 4378 Elt = EmitNeonSplat(Elt, CI); 4379 Elt = Builder.CreateBitCast(Elt, Val->getType()); 4380 Ops[1] = Builder.CreateInsertValue(Ops[1], Elt, i); 4381 } 4382 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 4383 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 4384 return Builder.CreateStore(Ops[1], Ops[0]); 4385 } 4386 case ARM::BI__builtin_neon_vmax_v: 4387 case ARM::BI__builtin_neon_vmaxq_v: 4388 Int = usgn ? Intrinsic::arm_neon_vmaxu : Intrinsic::arm_neon_vmaxs; 4389 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax"); 4390 case ARM::BI__builtin_neon_vmin_v: 4391 case ARM::BI__builtin_neon_vminq_v: 4392 Int = usgn ? Intrinsic::arm_neon_vminu : Intrinsic::arm_neon_vmins; 4393 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin"); 4394 case ARM::BI__builtin_neon_vmovl_v: { 4395 llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy); 4396 Ops[0] = Builder.CreateBitCast(Ops[0], DTy); 4397 if (usgn) 4398 return Builder.CreateZExt(Ops[0], Ty, "vmovl"); 4399 return Builder.CreateSExt(Ops[0], Ty, "vmovl"); 4400 } 4401 case ARM::BI__builtin_neon_vmovn_v: { 4402 llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy); 4403 Ops[0] = Builder.CreateBitCast(Ops[0], QTy); 4404 return Builder.CreateTrunc(Ops[0], Ty, "vmovn"); 4405 } 4406 case ARM::BI__builtin_neon_vmul_v: 4407 case ARM::BI__builtin_neon_vmulq_v: 4408 assert(Type.isPoly() && "vmul builtin only supported for polynomial types"); 4409 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmulp, Ty), 4410 Ops, "vmul"); 4411 case ARM::BI__builtin_neon_vmull_v: 4412 // FIXME: the integer vmull operations could be emitted in terms of pure 4413 // LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of 4414 // hoisting the exts outside loops. Until global ISel comes along that can 4415 // see through such movement this leads to bad CodeGen. So we need an 4416 // intrinsic for now. 4417 Int = usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls; 4418 Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int; 4419 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull"); 4420 case ARM::BI__builtin_neon_vfma_v: 4421 case ARM::BI__builtin_neon_vfmaq_v: { 4422 Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty); 4423 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 4424 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 4425 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 4426 4427 // NEON intrinsic puts accumulator first, unlike the LLVM fma. 4428 return Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]); 4429 } 4430 case ARM::BI__builtin_neon_vpadal_v: 4431 case ARM::BI__builtin_neon_vpadalq_v: { 4432 Int = usgn ? Intrinsic::arm_neon_vpadalu : Intrinsic::arm_neon_vpadals; 4433 // The source operand type has twice as many elements of half the size. 4434 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); 4435 llvm::Type *EltTy = 4436 llvm::IntegerType::get(getLLVMContext(), EltBits / 2); 4437 llvm::Type *NarrowTy = 4438 llvm::VectorType::get(EltTy, VTy->getNumElements() * 2); 4439 llvm::Type *Tys[2] = { Ty, NarrowTy }; 4440 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpadal"); 4441 } 4442 case ARM::BI__builtin_neon_vpadd_v: 4443 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vpadd, Ty), 4444 Ops, "vpadd"); 4445 case ARM::BI__builtin_neon_vpaddl_v: 4446 case ARM::BI__builtin_neon_vpaddlq_v: { 4447 Int = usgn ? Intrinsic::arm_neon_vpaddlu : Intrinsic::arm_neon_vpaddls; 4448 // The source operand type has twice as many elements of half the size. 4449 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); 4450 llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2); 4451 llvm::Type *NarrowTy = 4452 llvm::VectorType::get(EltTy, VTy->getNumElements() * 2); 4453 llvm::Type *Tys[2] = { Ty, NarrowTy }; 4454 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl"); 4455 } 4456 case ARM::BI__builtin_neon_vpmax_v: 4457 Int = usgn ? Intrinsic::arm_neon_vpmaxu : Intrinsic::arm_neon_vpmaxs; 4458 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax"); 4459 case ARM::BI__builtin_neon_vpmin_v: 4460 Int = usgn ? Intrinsic::arm_neon_vpminu : Intrinsic::arm_neon_vpmins; 4461 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin"); 4462 case ARM::BI__builtin_neon_vqabs_v: 4463 case ARM::BI__builtin_neon_vqabsq_v: 4464 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqabs, Ty), 4465 Ops, "vqabs"); 4466 case ARM::BI__builtin_neon_vqadd_v: 4467 case ARM::BI__builtin_neon_vqaddq_v: 4468 Int = usgn ? Intrinsic::arm_neon_vqaddu : Intrinsic::arm_neon_vqadds; 4469 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqadd"); 4470 case ARM::BI__builtin_neon_vqdmlal_v: { 4471 SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end()); 4472 Value *Mul = EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, Ty), 4473 MulOps, "vqdmlal"); 4474 4475 SmallVector<Value *, 2> AddOps; 4476 AddOps.push_back(Ops[0]); 4477 AddOps.push_back(Mul); 4478 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqadds, Ty), 4479 AddOps, "vqdmlal"); 4480 } 4481 case ARM::BI__builtin_neon_vqdmlsl_v: { 4482 SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end()); 4483 Value *Mul = EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, Ty), 4484 MulOps, "vqdmlsl"); 4485 4486 SmallVector<Value *, 2> SubOps; 4487 SubOps.push_back(Ops[0]); 4488 SubOps.push_back(Mul); 4489 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqsubs, Ty), 4490 SubOps, "vqdmlsl"); 4491 } 4492 case ARM::BI__builtin_neon_vqdmulh_v: 4493 case ARM::BI__builtin_neon_vqdmulhq_v: 4494 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmulh, Ty), 4495 Ops, "vqdmulh"); 4496 case ARM::BI__builtin_neon_vqdmull_v: 4497 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, Ty), 4498 Ops, "vqdmull"); 4499 case ARM::BI__builtin_neon_vqmovn_v: 4500 Int = usgn ? Intrinsic::arm_neon_vqmovnu : Intrinsic::arm_neon_vqmovns; 4501 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqmovn"); 4502 case ARM::BI__builtin_neon_vqmovun_v: 4503 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqmovnsu, Ty), 4504 Ops, "vqdmull"); 4505 case ARM::BI__builtin_neon_vqneg_v: 4506 case ARM::BI__builtin_neon_vqnegq_v: 4507 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqneg, Ty), 4508 Ops, "vqneg"); 4509 case ARM::BI__builtin_neon_vqrdmulh_v: 4510 case ARM::BI__builtin_neon_vqrdmulhq_v: 4511 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrdmulh, Ty), 4512 Ops, "vqrdmulh"); 4513 case ARM::BI__builtin_neon_vqrshl_v: 4514 case ARM::BI__builtin_neon_vqrshlq_v: 4515 Int = usgn ? Intrinsic::arm_neon_vqrshiftu : Intrinsic::arm_neon_vqrshifts; 4516 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshl"); 4517 case ARM::BI__builtin_neon_vqrshrn_n_v: 4518 Int = 4519 usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns; 4520 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n", 4521 1, true); 4522 case ARM::BI__builtin_neon_vqrshrun_n_v: 4523 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty), 4524 Ops, "vqrshrun_n", 1, true); 4525 case ARM::BI__builtin_neon_vqshl_v: 4526 case ARM::BI__builtin_neon_vqshlq_v: 4527 Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts; 4528 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl"); 4529 case ARM::BI__builtin_neon_vqshl_n_v: 4530 case ARM::BI__builtin_neon_vqshlq_n_v: 4531 Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts; 4532 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n", 4533 1, false); 4534 case ARM::BI__builtin_neon_vqshlu_n_v: 4535 case ARM::BI__builtin_neon_vqshluq_n_v: 4536 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftsu, Ty), 4537 Ops, "vqshlu", 1, false); 4538 case ARM::BI__builtin_neon_vqshrn_n_v: 4539 Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns; 4540 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n", 4541 1, true); 4542 case ARM::BI__builtin_neon_vqshrun_n_v: 4543 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty), 4544 Ops, "vqshrun_n", 1, true); 4545 case ARM::BI__builtin_neon_vqsub_v: 4546 case ARM::BI__builtin_neon_vqsubq_v: 4547 Int = usgn ? Intrinsic::arm_neon_vqsubu : Intrinsic::arm_neon_vqsubs; 4548 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqsub"); 4549 case ARM::BI__builtin_neon_vraddhn_v: 4550 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vraddhn, Ty), 4551 Ops, "vraddhn"); 4552 case ARM::BI__builtin_neon_vrecpe_v: 4553 case ARM::BI__builtin_neon_vrecpeq_v: 4554 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty), 4555 Ops, "vrecpe"); 4556 case ARM::BI__builtin_neon_vrecps_v: 4557 case ARM::BI__builtin_neon_vrecpsq_v: 4558 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecps, Ty), 4559 Ops, "vrecps"); 4560 case ARM::BI__builtin_neon_vrhadd_v: 4561 case ARM::BI__builtin_neon_vrhaddq_v: 4562 Int = usgn ? Intrinsic::arm_neon_vrhaddu : Intrinsic::arm_neon_vrhadds; 4563 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrhadd"); 4564 case ARM::BI__builtin_neon_vrshl_v: 4565 case ARM::BI__builtin_neon_vrshlq_v: 4566 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts; 4567 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshl"); 4568 case ARM::BI__builtin_neon_vrshrn_n_v: 4569 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty), 4570 Ops, "vrshrn_n", 1, true); 4571 case ARM::BI__builtin_neon_vrshr_n_v: 4572 case ARM::BI__builtin_neon_vrshrq_n_v: 4573 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts; 4574 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n", 1, true); 4575 case ARM::BI__builtin_neon_vrsqrte_v: 4576 case ARM::BI__builtin_neon_vrsqrteq_v: 4577 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrte, Ty), 4578 Ops, "vrsqrte"); 4579 case ARM::BI__builtin_neon_vrsqrts_v: 4580 case ARM::BI__builtin_neon_vrsqrtsq_v: 4581 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrts, Ty), 4582 Ops, "vrsqrts"); 4583 case ARM::BI__builtin_neon_vrsra_n_v: 4584 case ARM::BI__builtin_neon_vrsraq_n_v: 4585 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 4586 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 4587 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true); 4588 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts; 4589 Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Ty), Ops[1], Ops[2]); 4590 return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n"); 4591 case ARM::BI__builtin_neon_vrsubhn_v: 4592 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsubhn, Ty), 4593 Ops, "vrsubhn"); 4594 case ARM::BI__builtin_neon_vshl_v: 4595 case ARM::BI__builtin_neon_vshlq_v: 4596 Int = usgn ? Intrinsic::arm_neon_vshiftu : Intrinsic::arm_neon_vshifts; 4597 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vshl"); 4598 case ARM::BI__builtin_neon_vshll_n_v: 4599 Int = usgn ? Intrinsic::arm_neon_vshiftlu : Intrinsic::arm_neon_vshiftls; 4600 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vshll", 1); 4601 case ARM::BI__builtin_neon_vshl_n_v: 4602 case ARM::BI__builtin_neon_vshlq_n_v: 4603 Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false); 4604 return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1], 4605 "vshl_n"); 4606 case ARM::BI__builtin_neon_vshrn_n_v: 4607 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftn, Ty), 4608 Ops, "vshrn_n", 1, true); 4609 case ARM::BI__builtin_neon_vshr_n_v: 4610 case ARM::BI__builtin_neon_vshrq_n_v: 4611 return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, usgn, "vshr_n"); 4612 case ARM::BI__builtin_neon_vsri_n_v: 4613 case ARM::BI__builtin_neon_vsriq_n_v: 4614 rightShift = true; 4615 case ARM::BI__builtin_neon_vsli_n_v: 4616 case ARM::BI__builtin_neon_vsliq_n_v: 4617 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift); 4618 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty), 4619 Ops, "vsli_n"); 4620 case ARM::BI__builtin_neon_vsra_n_v: 4621 case ARM::BI__builtin_neon_vsraq_n_v: 4622 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 4623 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n"); 4624 return Builder.CreateAdd(Ops[0], Ops[1]); 4625 case ARM::BI__builtin_neon_vst1_v: 4626 case ARM::BI__builtin_neon_vst1q_v: 4627 Ops.push_back(Align); 4628 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, Ty), 4629 Ops, ""); 4630 case ARM::BI__builtin_neon_vst1q_lane_v: 4631 // Handle 64-bit integer elements as a special case. Use a shuffle to get 4632 // a one-element vector and avoid poor code for i64 in the backend. 4633 if (VTy->getElementType()->isIntegerTy(64)) { 4634 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 4635 Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2])); 4636 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV); 4637 Ops[2] = Align; 4638 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, 4639 Ops[1]->getType()), Ops); 4640 } 4641 // fall through 4642 case ARM::BI__builtin_neon_vst1_lane_v: { 4643 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 4644 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]); 4645 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 4646 StoreInst *St = Builder.CreateStore(Ops[1], 4647 Builder.CreateBitCast(Ops[0], Ty)); 4648 St->setAlignment(cast<ConstantInt>(Align)->getZExtValue()); 4649 return St; 4650 } 4651 case ARM::BI__builtin_neon_vst2_v: 4652 case ARM::BI__builtin_neon_vst2q_v: 4653 Ops.push_back(Align); 4654 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2, Ty), 4655 Ops, ""); 4656 case ARM::BI__builtin_neon_vst2_lane_v: 4657 case ARM::BI__builtin_neon_vst2q_lane_v: 4658 Ops.push_back(Align); 4659 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2lane, Ty), 4660 Ops, ""); 4661 case ARM::BI__builtin_neon_vst3_v: 4662 case ARM::BI__builtin_neon_vst3q_v: 4663 Ops.push_back(Align); 4664 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3, Ty), 4665 Ops, ""); 4666 case ARM::BI__builtin_neon_vst3_lane_v: 4667 case ARM::BI__builtin_neon_vst3q_lane_v: 4668 Ops.push_back(Align); 4669 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3lane, Ty), 4670 Ops, ""); 4671 case ARM::BI__builtin_neon_vst4_v: 4672 case ARM::BI__builtin_neon_vst4q_v: 4673 Ops.push_back(Align); 4674 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4, Ty), 4675 Ops, ""); 4676 case ARM::BI__builtin_neon_vst4_lane_v: 4677 case ARM::BI__builtin_neon_vst4q_lane_v: 4678 Ops.push_back(Align); 4679 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4lane, Ty), 4680 Ops, ""); 4681 case ARM::BI__builtin_neon_vsubhn_v: { 4682 llvm::VectorType *SrcTy = 4683 llvm::VectorType::getExtendedElementVectorType(VTy); 4684 4685 // %sum = add <4 x i32> %lhs, %rhs 4686 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy); 4687 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy); 4688 Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn"); 4689 4690 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16> 4691 Constant *ShiftAmt = ConstantInt::get(SrcTy->getElementType(), 4692 SrcTy->getScalarSizeInBits() / 2); 4693 ShiftAmt = ConstantVector::getSplat(VTy->getNumElements(), ShiftAmt); 4694 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn"); 4695 4696 // %res = trunc <4 x i32> %high to <4 x i16> 4697 return Builder.CreateTrunc(Ops[0], VTy, "vsubhn"); 4698 } 4699 case ARM::BI__builtin_neon_vtbl1_v: 4700 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1), 4701 Ops, "vtbl1"); 4702 case ARM::BI__builtin_neon_vtbl2_v: 4703 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2), 4704 Ops, "vtbl2"); 4705 case ARM::BI__builtin_neon_vtbl3_v: 4706 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3), 4707 Ops, "vtbl3"); 4708 case ARM::BI__builtin_neon_vtbl4_v: 4709 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4), 4710 Ops, "vtbl4"); 4711 case ARM::BI__builtin_neon_vtbx1_v: 4712 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1), 4713 Ops, "vtbx1"); 4714 case ARM::BI__builtin_neon_vtbx2_v: 4715 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2), 4716 Ops, "vtbx2"); 4717 case ARM::BI__builtin_neon_vtbx3_v: 4718 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3), 4719 Ops, "vtbx3"); 4720 case ARM::BI__builtin_neon_vtbx4_v: 4721 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4), 4722 Ops, "vtbx4"); 4723 case ARM::BI__builtin_neon_vtst_v: 4724 case ARM::BI__builtin_neon_vtstq_v: { 4725 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 4726 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 4727 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]); 4728 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0], 4729 ConstantAggregateZero::get(Ty)); 4730 return Builder.CreateSExt(Ops[0], Ty, "vtst"); 4731 } 4732 case ARM::BI__builtin_neon_vtrn_v: 4733 case ARM::BI__builtin_neon_vtrnq_v: { 4734 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); 4735 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 4736 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 4737 Value *SV = 0; 4738 4739 for (unsigned vi = 0; vi != 2; ++vi) { 4740 SmallVector<Constant*, 16> Indices; 4741 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { 4742 Indices.push_back(Builder.getInt32(i+vi)); 4743 Indices.push_back(Builder.getInt32(i+e+vi)); 4744 } 4745 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi); 4746 SV = llvm::ConstantVector::get(Indices); 4747 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn"); 4748 SV = Builder.CreateStore(SV, Addr); 4749 } 4750 return SV; 4751 } 4752 case ARM::BI__builtin_neon_vuzp_v: 4753 case ARM::BI__builtin_neon_vuzpq_v: { 4754 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); 4755 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 4756 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 4757 Value *SV = 0; 4758 4759 for (unsigned vi = 0; vi != 2; ++vi) { 4760 SmallVector<Constant*, 16> Indices; 4761 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) 4762 Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi)); 4763 4764 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi); 4765 SV = llvm::ConstantVector::get(Indices); 4766 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp"); 4767 SV = Builder.CreateStore(SV, Addr); 4768 } 4769 return SV; 4770 } 4771 case ARM::BI__builtin_neon_vzip_v: 4772 case ARM::BI__builtin_neon_vzipq_v: { 4773 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); 4774 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 4775 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 4776 Value *SV = 0; 4777 4778 for (unsigned vi = 0; vi != 2; ++vi) { 4779 SmallVector<Constant*, 16> Indices; 4780 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { 4781 Indices.push_back(ConstantInt::get(Int32Ty, (i + vi*e) >> 1)); 4782 Indices.push_back(ConstantInt::get(Int32Ty, ((i + vi*e) >> 1)+e)); 4783 } 4784 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi); 4785 SV = llvm::ConstantVector::get(Indices); 4786 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip"); 4787 SV = Builder.CreateStore(SV, Addr); 4788 } 4789 return SV; 4790 } 4791 } 4792} 4793 4794llvm::Value *CodeGenFunction:: 4795BuildVector(ArrayRef<llvm::Value*> Ops) { 4796 assert((Ops.size() & (Ops.size() - 1)) == 0 && 4797 "Not a power-of-two sized vector!"); 4798 bool AllConstants = true; 4799 for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i) 4800 AllConstants &= isa<Constant>(Ops[i]); 4801 4802 // If this is a constant vector, create a ConstantVector. 4803 if (AllConstants) { 4804 SmallVector<llvm::Constant*, 16> CstOps; 4805 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 4806 CstOps.push_back(cast<Constant>(Ops[i])); 4807 return llvm::ConstantVector::get(CstOps); 4808 } 4809 4810 // Otherwise, insertelement the values to build the vector. 4811 Value *Result = 4812 llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), Ops.size())); 4813 4814 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 4815 Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i)); 4816 4817 return Result; 4818} 4819 4820Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, 4821 const CallExpr *E) { 4822 SmallVector<Value*, 4> Ops; 4823 4824 // Find out if any arguments are required to be integer constant expressions. 4825 unsigned ICEArguments = 0; 4826 ASTContext::GetBuiltinTypeError Error; 4827 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); 4828 assert(Error == ASTContext::GE_None && "Should not codegen an error"); 4829 4830 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) { 4831 // If this is a normal argument, just emit it as a scalar. 4832 if ((ICEArguments & (1 << i)) == 0) { 4833 Ops.push_back(EmitScalarExpr(E->getArg(i))); 4834 continue; 4835 } 4836 4837 // If this is required to be a constant, constant fold it so that we know 4838 // that the generated intrinsic gets a ConstantInt. 4839 llvm::APSInt Result; 4840 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext()); 4841 assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst; 4842 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result)); 4843 } 4844 4845 switch (BuiltinID) { 4846 default: return 0; 4847 case X86::BI__builtin_ia32_vec_init_v8qi: 4848 case X86::BI__builtin_ia32_vec_init_v4hi: 4849 case X86::BI__builtin_ia32_vec_init_v2si: 4850 return Builder.CreateBitCast(BuildVector(Ops), 4851 llvm::Type::getX86_MMXTy(getLLVMContext())); 4852 case X86::BI__builtin_ia32_vec_ext_v2si: 4853 return Builder.CreateExtractElement(Ops[0], 4854 llvm::ConstantInt::get(Ops[1]->getType(), 0)); 4855 case X86::BI__builtin_ia32_ldmxcsr: { 4856 Value *Tmp = CreateMemTemp(E->getArg(0)->getType()); 4857 Builder.CreateStore(Ops[0], Tmp); 4858 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr), 4859 Builder.CreateBitCast(Tmp, Int8PtrTy)); 4860 } 4861 case X86::BI__builtin_ia32_stmxcsr: { 4862 Value *Tmp = CreateMemTemp(E->getType()); 4863 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr), 4864 Builder.CreateBitCast(Tmp, Int8PtrTy)); 4865 return Builder.CreateLoad(Tmp, "stmxcsr"); 4866 } 4867 case X86::BI__builtin_ia32_storehps: 4868 case X86::BI__builtin_ia32_storelps: { 4869 llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty); 4870 llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2); 4871 4872 // cast val v2i64 4873 Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast"); 4874 4875 // extract (0, 1) 4876 unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1; 4877 llvm::Value *Idx = llvm::ConstantInt::get(Int32Ty, Index); 4878 Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract"); 4879 4880 // cast pointer to i64 & store 4881 Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy); 4882 return Builder.CreateStore(Ops[1], Ops[0]); 4883 } 4884 case X86::BI__builtin_ia32_palignr: { 4885 unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); 4886 4887 // If palignr is shifting the pair of input vectors less than 9 bytes, 4888 // emit a shuffle instruction. 4889 if (shiftVal <= 8) { 4890 SmallVector<llvm::Constant*, 8> Indices; 4891 for (unsigned i = 0; i != 8; ++i) 4892 Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i)); 4893 4894 Value* SV = llvm::ConstantVector::get(Indices); 4895 return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr"); 4896 } 4897 4898 // If palignr is shifting the pair of input vectors more than 8 but less 4899 // than 16 bytes, emit a logical right shift of the destination. 4900 if (shiftVal < 16) { 4901 // MMX has these as 1 x i64 vectors for some odd optimization reasons. 4902 llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 1); 4903 4904 Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast"); 4905 Ops[1] = llvm::ConstantInt::get(VecTy, (shiftVal-8) * 8); 4906 4907 // create i32 constant 4908 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_mmx_psrl_q); 4909 return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr"); 4910 } 4911 4912 // If palignr is shifting the pair of vectors more than 16 bytes, emit zero. 4913 return llvm::Constant::getNullValue(ConvertType(E->getType())); 4914 } 4915 case X86::BI__builtin_ia32_palignr128: { 4916 unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); 4917 4918 // If palignr is shifting the pair of input vectors less than 17 bytes, 4919 // emit a shuffle instruction. 4920 if (shiftVal <= 16) { 4921 SmallVector<llvm::Constant*, 16> Indices; 4922 for (unsigned i = 0; i != 16; ++i) 4923 Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i)); 4924 4925 Value* SV = llvm::ConstantVector::get(Indices); 4926 return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr"); 4927 } 4928 4929 // If palignr is shifting the pair of input vectors more than 16 but less 4930 // than 32 bytes, emit a logical right shift of the destination. 4931 if (shiftVal < 32) { 4932 llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2); 4933 4934 Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast"); 4935 Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8); 4936 4937 // create i32 constant 4938 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq); 4939 return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr"); 4940 } 4941 4942 // If palignr is shifting the pair of vectors more than 32 bytes, emit zero. 4943 return llvm::Constant::getNullValue(ConvertType(E->getType())); 4944 } 4945 case X86::BI__builtin_ia32_palignr256: { 4946 unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); 4947 4948 // If palignr is shifting the pair of input vectors less than 17 bytes, 4949 // emit a shuffle instruction. 4950 if (shiftVal <= 16) { 4951 SmallVector<llvm::Constant*, 32> Indices; 4952 // 256-bit palignr operates on 128-bit lanes so we need to handle that 4953 for (unsigned l = 0; l != 2; ++l) { 4954 unsigned LaneStart = l * 16; 4955 unsigned LaneEnd = (l+1) * 16; 4956 for (unsigned i = 0; i != 16; ++i) { 4957 unsigned Idx = shiftVal + i + LaneStart; 4958 if (Idx >= LaneEnd) Idx += 16; // end of lane, switch operand 4959 Indices.push_back(llvm::ConstantInt::get(Int32Ty, Idx)); 4960 } 4961 } 4962 4963 Value* SV = llvm::ConstantVector::get(Indices); 4964 return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr"); 4965 } 4966 4967 // If palignr is shifting the pair of input vectors more than 16 but less 4968 // than 32 bytes, emit a logical right shift of the destination. 4969 if (shiftVal < 32) { 4970 llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 4); 4971 4972 Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast"); 4973 Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8); 4974 4975 // create i32 constant 4976 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_avx2_psrl_dq); 4977 return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr"); 4978 } 4979 4980 // If palignr is shifting the pair of vectors more than 32 bytes, emit zero. 4981 return llvm::Constant::getNullValue(ConvertType(E->getType())); 4982 } 4983 case X86::BI__builtin_ia32_movntps: 4984 case X86::BI__builtin_ia32_movntps256: 4985 case X86::BI__builtin_ia32_movntpd: 4986 case X86::BI__builtin_ia32_movntpd256: 4987 case X86::BI__builtin_ia32_movntdq: 4988 case X86::BI__builtin_ia32_movntdq256: 4989 case X86::BI__builtin_ia32_movnti: 4990 case X86::BI__builtin_ia32_movnti64: { 4991 llvm::MDNode *Node = llvm::MDNode::get(getLLVMContext(), 4992 Builder.getInt32(1)); 4993 4994 // Convert the type of the pointer to a pointer to the stored type. 4995 Value *BC = Builder.CreateBitCast(Ops[0], 4996 llvm::PointerType::getUnqual(Ops[1]->getType()), 4997 "cast"); 4998 StoreInst *SI = Builder.CreateStore(Ops[1], BC); 4999 SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node); 5000 5001 // If the operand is an integer, we can't assume alignment. Otherwise, 5002 // assume natural alignment. 5003 QualType ArgTy = E->getArg(1)->getType(); 5004 unsigned Align; 5005 if (ArgTy->isIntegerType()) 5006 Align = 1; 5007 else 5008 Align = getContext().getTypeSizeInChars(ArgTy).getQuantity(); 5009 SI->setAlignment(Align); 5010 return SI; 5011 } 5012 // 3DNow! 5013 case X86::BI__builtin_ia32_pswapdsf: 5014 case X86::BI__builtin_ia32_pswapdsi: { 5015 const char *name = 0; 5016 Intrinsic::ID ID = Intrinsic::not_intrinsic; 5017 switch(BuiltinID) { 5018 default: llvm_unreachable("Unsupported intrinsic!"); 5019 case X86::BI__builtin_ia32_pswapdsf: 5020 case X86::BI__builtin_ia32_pswapdsi: 5021 name = "pswapd"; 5022 ID = Intrinsic::x86_3dnowa_pswapd; 5023 break; 5024 } 5025 llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext()); 5026 Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast"); 5027 llvm::Function *F = CGM.getIntrinsic(ID); 5028 return Builder.CreateCall(F, Ops, name); 5029 } 5030 case X86::BI__builtin_ia32_rdrand16_step: 5031 case X86::BI__builtin_ia32_rdrand32_step: 5032 case X86::BI__builtin_ia32_rdrand64_step: 5033 case X86::BI__builtin_ia32_rdseed16_step: 5034 case X86::BI__builtin_ia32_rdseed32_step: 5035 case X86::BI__builtin_ia32_rdseed64_step: { 5036 Intrinsic::ID ID; 5037 switch (BuiltinID) { 5038 default: llvm_unreachable("Unsupported intrinsic!"); 5039 case X86::BI__builtin_ia32_rdrand16_step: 5040 ID = Intrinsic::x86_rdrand_16; 5041 break; 5042 case X86::BI__builtin_ia32_rdrand32_step: 5043 ID = Intrinsic::x86_rdrand_32; 5044 break; 5045 case X86::BI__builtin_ia32_rdrand64_step: 5046 ID = Intrinsic::x86_rdrand_64; 5047 break; 5048 case X86::BI__builtin_ia32_rdseed16_step: 5049 ID = Intrinsic::x86_rdseed_16; 5050 break; 5051 case X86::BI__builtin_ia32_rdseed32_step: 5052 ID = Intrinsic::x86_rdseed_32; 5053 break; 5054 case X86::BI__builtin_ia32_rdseed64_step: 5055 ID = Intrinsic::x86_rdseed_64; 5056 break; 5057 } 5058 5059 Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID)); 5060 Builder.CreateStore(Builder.CreateExtractValue(Call, 0), Ops[0]); 5061 return Builder.CreateExtractValue(Call, 1); 5062 } 5063 // AVX2 broadcast 5064 case X86::BI__builtin_ia32_vbroadcastsi256: { 5065 Value *VecTmp = CreateMemTemp(E->getArg(0)->getType()); 5066 Builder.CreateStore(Ops[0], VecTmp); 5067 Value *F = CGM.getIntrinsic(Intrinsic::x86_avx2_vbroadcasti128); 5068 return Builder.CreateCall(F, Builder.CreateBitCast(VecTmp, Int8PtrTy)); 5069 } 5070 } 5071} 5072 5073 5074Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID, 5075 const CallExpr *E) { 5076 SmallVector<Value*, 4> Ops; 5077 5078 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) 5079 Ops.push_back(EmitScalarExpr(E->getArg(i))); 5080 5081 Intrinsic::ID ID = Intrinsic::not_intrinsic; 5082 5083 switch (BuiltinID) { 5084 default: return 0; 5085 5086 // vec_ld, vec_lvsl, vec_lvsr 5087 case PPC::BI__builtin_altivec_lvx: 5088 case PPC::BI__builtin_altivec_lvxl: 5089 case PPC::BI__builtin_altivec_lvebx: 5090 case PPC::BI__builtin_altivec_lvehx: 5091 case PPC::BI__builtin_altivec_lvewx: 5092 case PPC::BI__builtin_altivec_lvsl: 5093 case PPC::BI__builtin_altivec_lvsr: 5094 { 5095 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy); 5096 5097 Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]); 5098 Ops.pop_back(); 5099 5100 switch (BuiltinID) { 5101 default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!"); 5102 case PPC::BI__builtin_altivec_lvx: 5103 ID = Intrinsic::ppc_altivec_lvx; 5104 break; 5105 case PPC::BI__builtin_altivec_lvxl: 5106 ID = Intrinsic::ppc_altivec_lvxl; 5107 break; 5108 case PPC::BI__builtin_altivec_lvebx: 5109 ID = Intrinsic::ppc_altivec_lvebx; 5110 break; 5111 case PPC::BI__builtin_altivec_lvehx: 5112 ID = Intrinsic::ppc_altivec_lvehx; 5113 break; 5114 case PPC::BI__builtin_altivec_lvewx: 5115 ID = Intrinsic::ppc_altivec_lvewx; 5116 break; 5117 case PPC::BI__builtin_altivec_lvsl: 5118 ID = Intrinsic::ppc_altivec_lvsl; 5119 break; 5120 case PPC::BI__builtin_altivec_lvsr: 5121 ID = Intrinsic::ppc_altivec_lvsr; 5122 break; 5123 } 5124 llvm::Function *F = CGM.getIntrinsic(ID); 5125 return Builder.CreateCall(F, Ops, ""); 5126 } 5127 5128 // vec_st 5129 case PPC::BI__builtin_altivec_stvx: 5130 case PPC::BI__builtin_altivec_stvxl: 5131 case PPC::BI__builtin_altivec_stvebx: 5132 case PPC::BI__builtin_altivec_stvehx: 5133 case PPC::BI__builtin_altivec_stvewx: 5134 { 5135 Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy); 5136 Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]); 5137 Ops.pop_back(); 5138 5139 switch (BuiltinID) { 5140 default: llvm_unreachable("Unsupported st intrinsic!"); 5141 case PPC::BI__builtin_altivec_stvx: 5142 ID = Intrinsic::ppc_altivec_stvx; 5143 break; 5144 case PPC::BI__builtin_altivec_stvxl: 5145 ID = Intrinsic::ppc_altivec_stvxl; 5146 break; 5147 case PPC::BI__builtin_altivec_stvebx: 5148 ID = Intrinsic::ppc_altivec_stvebx; 5149 break; 5150 case PPC::BI__builtin_altivec_stvehx: 5151 ID = Intrinsic::ppc_altivec_stvehx; 5152 break; 5153 case PPC::BI__builtin_altivec_stvewx: 5154 ID = Intrinsic::ppc_altivec_stvewx; 5155 break; 5156 } 5157 llvm::Function *F = CGM.getIntrinsic(ID); 5158 return Builder.CreateCall(F, Ops, ""); 5159 } 5160 } 5161} 5162