CGBuiltin.cpp revision 0cd6bd62f3f7ee42f08ad130395ac65564768990
1//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This contains code to emit Builtin calls as LLVM code. 11// 12//===----------------------------------------------------------------------===// 13 14#include "CodeGenFunction.h" 15#include "CGObjCRuntime.h" 16#include "CodeGenModule.h" 17#include "TargetInfo.h" 18#include "clang/AST/ASTContext.h" 19#include "clang/AST/Decl.h" 20#include "clang/Basic/TargetBuiltins.h" 21#include "clang/Basic/TargetInfo.h" 22#include "llvm/IR/DataLayout.h" 23#include "llvm/IR/Intrinsics.h" 24 25using namespace clang; 26using namespace CodeGen; 27using namespace llvm; 28 29/// getBuiltinLibFunction - Given a builtin id for a function like 30/// "__builtin_fabsf", return a Function* for "fabsf". 31llvm::Value *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD, 32 unsigned BuiltinID) { 33 assert(Context.BuiltinInfo.isLibFunction(BuiltinID)); 34 35 // Get the name, skip over the __builtin_ prefix (if necessary). 36 StringRef Name; 37 GlobalDecl D(FD); 38 39 // If the builtin has been declared explicitly with an assembler label, 40 // use the mangled name. This differs from the plain label on platforms 41 // that prefix labels. 42 if (FD->hasAttr<AsmLabelAttr>()) 43 Name = getMangledName(D); 44 else 45 Name = Context.BuiltinInfo.GetName(BuiltinID) + 10; 46 47 llvm::FunctionType *Ty = 48 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType())); 49 50 return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false); 51} 52 53/// Emit the conversions required to turn the given value into an 54/// integer of the given size. 55static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V, 56 QualType T, llvm::IntegerType *IntType) { 57 V = CGF.EmitToMemory(V, T); 58 59 if (V->getType()->isPointerTy()) 60 return CGF.Builder.CreatePtrToInt(V, IntType); 61 62 assert(V->getType() == IntType); 63 return V; 64} 65 66static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V, 67 QualType T, llvm::Type *ResultType) { 68 V = CGF.EmitFromMemory(V, T); 69 70 if (ResultType->isPointerTy()) 71 return CGF.Builder.CreateIntToPtr(V, ResultType); 72 73 assert(V->getType() == ResultType); 74 return V; 75} 76 77/// Utility to insert an atomic instruction based on Instrinsic::ID 78/// and the expression node. 79static RValue EmitBinaryAtomic(CodeGenFunction &CGF, 80 llvm::AtomicRMWInst::BinOp Kind, 81 const CallExpr *E) { 82 QualType T = E->getType(); 83 assert(E->getArg(0)->getType()->isPointerType()); 84 assert(CGF.getContext().hasSameUnqualifiedType(T, 85 E->getArg(0)->getType()->getPointeeType())); 86 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())); 87 88 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); 89 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); 90 91 llvm::IntegerType *IntType = 92 llvm::IntegerType::get(CGF.getLLVMContext(), 93 CGF.getContext().getTypeSize(T)); 94 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); 95 96 llvm::Value *Args[2]; 97 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); 98 Args[1] = CGF.EmitScalarExpr(E->getArg(1)); 99 llvm::Type *ValueType = Args[1]->getType(); 100 Args[1] = EmitToInt(CGF, Args[1], T, IntType); 101 102 llvm::Value *Result = 103 CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1], 104 llvm::SequentiallyConsistent); 105 Result = EmitFromInt(CGF, Result, T, ValueType); 106 return RValue::get(Result); 107} 108 109/// Utility to insert an atomic instruction based Instrinsic::ID and 110/// the expression node, where the return value is the result of the 111/// operation. 112static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF, 113 llvm::AtomicRMWInst::BinOp Kind, 114 const CallExpr *E, 115 Instruction::BinaryOps Op) { 116 QualType T = E->getType(); 117 assert(E->getArg(0)->getType()->isPointerType()); 118 assert(CGF.getContext().hasSameUnqualifiedType(T, 119 E->getArg(0)->getType()->getPointeeType())); 120 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())); 121 122 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); 123 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); 124 125 llvm::IntegerType *IntType = 126 llvm::IntegerType::get(CGF.getLLVMContext(), 127 CGF.getContext().getTypeSize(T)); 128 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); 129 130 llvm::Value *Args[2]; 131 Args[1] = CGF.EmitScalarExpr(E->getArg(1)); 132 llvm::Type *ValueType = Args[1]->getType(); 133 Args[1] = EmitToInt(CGF, Args[1], T, IntType); 134 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); 135 136 llvm::Value *Result = 137 CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1], 138 llvm::SequentiallyConsistent); 139 Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]); 140 Result = EmitFromInt(CGF, Result, T, ValueType); 141 return RValue::get(Result); 142} 143 144/// EmitFAbs - Emit a call to fabs/fabsf/fabsl, depending on the type of ValTy, 145/// which must be a scalar floating point type. 146static Value *EmitFAbs(CodeGenFunction &CGF, Value *V, QualType ValTy) { 147 const BuiltinType *ValTyP = ValTy->getAs<BuiltinType>(); 148 assert(ValTyP && "isn't scalar fp type!"); 149 150 StringRef FnName; 151 switch (ValTyP->getKind()) { 152 default: llvm_unreachable("Isn't a scalar fp type!"); 153 case BuiltinType::Float: FnName = "fabsf"; break; 154 case BuiltinType::Double: FnName = "fabs"; break; 155 case BuiltinType::LongDouble: FnName = "fabsl"; break; 156 } 157 158 // The prototype is something that takes and returns whatever V's type is. 159 llvm::FunctionType *FT = llvm::FunctionType::get(V->getType(), V->getType(), 160 false); 161 llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction(FT, FnName); 162 163 return CGF.EmitNounwindRuntimeCall(Fn, V, "abs"); 164} 165 166static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *Fn, 167 const CallExpr *E, llvm::Value *calleeValue) { 168 return CGF.EmitCall(E->getCallee()->getType(), calleeValue, 169 ReturnValueSlot(), E->arg_begin(), E->arg_end(), Fn); 170} 171 172/// \brief Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.* 173/// depending on IntrinsicID. 174/// 175/// \arg CGF The current codegen function. 176/// \arg IntrinsicID The ID for the Intrinsic we wish to generate. 177/// \arg X The first argument to the llvm.*.with.overflow.*. 178/// \arg Y The second argument to the llvm.*.with.overflow.*. 179/// \arg Carry The carry returned by the llvm.*.with.overflow.*. 180/// \returns The result (i.e. sum/product) returned by the intrinsic. 181static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF, 182 const llvm::Intrinsic::ID IntrinsicID, 183 llvm::Value *X, llvm::Value *Y, 184 llvm::Value *&Carry) { 185 // Make sure we have integers of the same width. 186 assert(X->getType() == Y->getType() && 187 "Arguments must be the same type. (Did you forget to make sure both " 188 "arguments have the same integer width?)"); 189 190 llvm::Value *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType()); 191 llvm::Value *Tmp = CGF.Builder.CreateCall2(Callee, X, Y); 192 Carry = CGF.Builder.CreateExtractValue(Tmp, 1); 193 return CGF.Builder.CreateExtractValue(Tmp, 0); 194} 195 196RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, 197 unsigned BuiltinID, const CallExpr *E) { 198 // See if we can constant fold this builtin. If so, don't emit it at all. 199 Expr::EvalResult Result; 200 if (E->EvaluateAsRValue(Result, CGM.getContext()) && 201 !Result.hasSideEffects()) { 202 if (Result.Val.isInt()) 203 return RValue::get(llvm::ConstantInt::get(getLLVMContext(), 204 Result.Val.getInt())); 205 if (Result.Val.isFloat()) 206 return RValue::get(llvm::ConstantFP::get(getLLVMContext(), 207 Result.Val.getFloat())); 208 } 209 210 switch (BuiltinID) { 211 default: break; // Handle intrinsics and libm functions below. 212 case Builtin::BI__builtin___CFStringMakeConstantString: 213 case Builtin::BI__builtin___NSStringMakeConstantString: 214 return RValue::get(CGM.EmitConstantExpr(E, E->getType(), 0)); 215 case Builtin::BI__builtin_stdarg_start: 216 case Builtin::BI__builtin_va_start: 217 case Builtin::BI__builtin_va_end: { 218 Value *ArgValue = EmitVAListRef(E->getArg(0)); 219 llvm::Type *DestType = Int8PtrTy; 220 if (ArgValue->getType() != DestType) 221 ArgValue = Builder.CreateBitCast(ArgValue, DestType, 222 ArgValue->getName().data()); 223 224 Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_end) ? 225 Intrinsic::vaend : Intrinsic::vastart; 226 return RValue::get(Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue)); 227 } 228 case Builtin::BI__builtin_va_copy: { 229 Value *DstPtr = EmitVAListRef(E->getArg(0)); 230 Value *SrcPtr = EmitVAListRef(E->getArg(1)); 231 232 llvm::Type *Type = Int8PtrTy; 233 234 DstPtr = Builder.CreateBitCast(DstPtr, Type); 235 SrcPtr = Builder.CreateBitCast(SrcPtr, Type); 236 return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy), 237 DstPtr, SrcPtr)); 238 } 239 case Builtin::BI__builtin_abs: 240 case Builtin::BI__builtin_labs: 241 case Builtin::BI__builtin_llabs: { 242 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 243 244 Value *NegOp = Builder.CreateNeg(ArgValue, "neg"); 245 Value *CmpResult = 246 Builder.CreateICmpSGE(ArgValue, 247 llvm::Constant::getNullValue(ArgValue->getType()), 248 "abscond"); 249 Value *Result = 250 Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs"); 251 252 return RValue::get(Result); 253 } 254 255 case Builtin::BI__builtin_conj: 256 case Builtin::BI__builtin_conjf: 257 case Builtin::BI__builtin_conjl: { 258 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); 259 Value *Real = ComplexVal.first; 260 Value *Imag = ComplexVal.second; 261 Value *Zero = 262 Imag->getType()->isFPOrFPVectorTy() 263 ? llvm::ConstantFP::getZeroValueForNegation(Imag->getType()) 264 : llvm::Constant::getNullValue(Imag->getType()); 265 266 Imag = Builder.CreateFSub(Zero, Imag, "sub"); 267 return RValue::getComplex(std::make_pair(Real, Imag)); 268 } 269 case Builtin::BI__builtin_creal: 270 case Builtin::BI__builtin_crealf: 271 case Builtin::BI__builtin_creall: 272 case Builtin::BIcreal: 273 case Builtin::BIcrealf: 274 case Builtin::BIcreall: { 275 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); 276 return RValue::get(ComplexVal.first); 277 } 278 279 case Builtin::BI__builtin_cimag: 280 case Builtin::BI__builtin_cimagf: 281 case Builtin::BI__builtin_cimagl: 282 case Builtin::BIcimag: 283 case Builtin::BIcimagf: 284 case Builtin::BIcimagl: { 285 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); 286 return RValue::get(ComplexVal.second); 287 } 288 289 case Builtin::BI__builtin_ctzs: 290 case Builtin::BI__builtin_ctz: 291 case Builtin::BI__builtin_ctzl: 292 case Builtin::BI__builtin_ctzll: { 293 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 294 295 llvm::Type *ArgType = ArgValue->getType(); 296 Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); 297 298 llvm::Type *ResultType = ConvertType(E->getType()); 299 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef()); 300 Value *Result = Builder.CreateCall2(F, ArgValue, ZeroUndef); 301 if (Result->getType() != ResultType) 302 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, 303 "cast"); 304 return RValue::get(Result); 305 } 306 case Builtin::BI__builtin_clzs: 307 case Builtin::BI__builtin_clz: 308 case Builtin::BI__builtin_clzl: 309 case Builtin::BI__builtin_clzll: { 310 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 311 312 llvm::Type *ArgType = ArgValue->getType(); 313 Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType); 314 315 llvm::Type *ResultType = ConvertType(E->getType()); 316 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef()); 317 Value *Result = Builder.CreateCall2(F, ArgValue, ZeroUndef); 318 if (Result->getType() != ResultType) 319 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, 320 "cast"); 321 return RValue::get(Result); 322 } 323 case Builtin::BI__builtin_ffs: 324 case Builtin::BI__builtin_ffsl: 325 case Builtin::BI__builtin_ffsll: { 326 // ffs(x) -> x ? cttz(x) + 1 : 0 327 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 328 329 llvm::Type *ArgType = ArgValue->getType(); 330 Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); 331 332 llvm::Type *ResultType = ConvertType(E->getType()); 333 Value *Tmp = Builder.CreateAdd(Builder.CreateCall2(F, ArgValue, 334 Builder.getTrue()), 335 llvm::ConstantInt::get(ArgType, 1)); 336 Value *Zero = llvm::Constant::getNullValue(ArgType); 337 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero"); 338 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs"); 339 if (Result->getType() != ResultType) 340 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, 341 "cast"); 342 return RValue::get(Result); 343 } 344 case Builtin::BI__builtin_parity: 345 case Builtin::BI__builtin_parityl: 346 case Builtin::BI__builtin_parityll: { 347 // parity(x) -> ctpop(x) & 1 348 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 349 350 llvm::Type *ArgType = ArgValue->getType(); 351 Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType); 352 353 llvm::Type *ResultType = ConvertType(E->getType()); 354 Value *Tmp = Builder.CreateCall(F, ArgValue); 355 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1)); 356 if (Result->getType() != ResultType) 357 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, 358 "cast"); 359 return RValue::get(Result); 360 } 361 case Builtin::BI__builtin_popcount: 362 case Builtin::BI__builtin_popcountl: 363 case Builtin::BI__builtin_popcountll: { 364 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 365 366 llvm::Type *ArgType = ArgValue->getType(); 367 Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType); 368 369 llvm::Type *ResultType = ConvertType(E->getType()); 370 Value *Result = Builder.CreateCall(F, ArgValue); 371 if (Result->getType() != ResultType) 372 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, 373 "cast"); 374 return RValue::get(Result); 375 } 376 case Builtin::BI__builtin_expect: { 377 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 378 llvm::Type *ArgType = ArgValue->getType(); 379 380 Value *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType); 381 Value *ExpectedValue = EmitScalarExpr(E->getArg(1)); 382 383 Value *Result = Builder.CreateCall2(FnExpect, ArgValue, ExpectedValue, 384 "expval"); 385 return RValue::get(Result); 386 } 387 case Builtin::BI__builtin_bswap16: 388 case Builtin::BI__builtin_bswap32: 389 case Builtin::BI__builtin_bswap64: { 390 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 391 llvm::Type *ArgType = ArgValue->getType(); 392 Value *F = CGM.getIntrinsic(Intrinsic::bswap, ArgType); 393 return RValue::get(Builder.CreateCall(F, ArgValue)); 394 } 395 case Builtin::BI__builtin_object_size: { 396 // We rely on constant folding to deal with expressions with side effects. 397 assert(!E->getArg(0)->HasSideEffects(getContext()) && 398 "should have been constant folded"); 399 400 // We pass this builtin onto the optimizer so that it can 401 // figure out the object size in more complex cases. 402 llvm::Type *ResType = ConvertType(E->getType()); 403 404 // LLVM only supports 0 and 2, make sure that we pass along that 405 // as a boolean. 406 Value *Ty = EmitScalarExpr(E->getArg(1)); 407 ConstantInt *CI = dyn_cast<ConstantInt>(Ty); 408 assert(CI); 409 uint64_t val = CI->getZExtValue(); 410 CI = ConstantInt::get(Builder.getInt1Ty(), (val & 0x2) >> 1); 411 412 Value *F = CGM.getIntrinsic(Intrinsic::objectsize, ResType); 413 return RValue::get(Builder.CreateCall2(F, EmitScalarExpr(E->getArg(0)),CI)); 414 } 415 case Builtin::BI__builtin_prefetch: { 416 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0)); 417 // FIXME: Technically these constants should of type 'int', yes? 418 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) : 419 llvm::ConstantInt::get(Int32Ty, 0); 420 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : 421 llvm::ConstantInt::get(Int32Ty, 3); 422 Value *Data = llvm::ConstantInt::get(Int32Ty, 1); 423 Value *F = CGM.getIntrinsic(Intrinsic::prefetch); 424 return RValue::get(Builder.CreateCall4(F, Address, RW, Locality, Data)); 425 } 426 case Builtin::BI__builtin_readcyclecounter: { 427 Value *F = CGM.getIntrinsic(Intrinsic::readcyclecounter); 428 return RValue::get(Builder.CreateCall(F)); 429 } 430 case Builtin::BI__builtin_trap: { 431 Value *F = CGM.getIntrinsic(Intrinsic::trap); 432 return RValue::get(Builder.CreateCall(F)); 433 } 434 case Builtin::BI__debugbreak: { 435 Value *F = CGM.getIntrinsic(Intrinsic::debugtrap); 436 return RValue::get(Builder.CreateCall(F)); 437 } 438 case Builtin::BI__builtin_unreachable: { 439 if (SanOpts->Unreachable) 440 EmitCheck(Builder.getFalse(), "builtin_unreachable", 441 EmitCheckSourceLocation(E->getExprLoc()), 442 ArrayRef<llvm::Value *>(), CRK_Unrecoverable); 443 else 444 Builder.CreateUnreachable(); 445 446 // We do need to preserve an insertion point. 447 EmitBlock(createBasicBlock("unreachable.cont")); 448 449 return RValue::get(0); 450 } 451 452 case Builtin::BI__builtin_powi: 453 case Builtin::BI__builtin_powif: 454 case Builtin::BI__builtin_powil: { 455 Value *Base = EmitScalarExpr(E->getArg(0)); 456 Value *Exponent = EmitScalarExpr(E->getArg(1)); 457 llvm::Type *ArgType = Base->getType(); 458 Value *F = CGM.getIntrinsic(Intrinsic::powi, ArgType); 459 return RValue::get(Builder.CreateCall2(F, Base, Exponent)); 460 } 461 462 case Builtin::BI__builtin_isgreater: 463 case Builtin::BI__builtin_isgreaterequal: 464 case Builtin::BI__builtin_isless: 465 case Builtin::BI__builtin_islessequal: 466 case Builtin::BI__builtin_islessgreater: 467 case Builtin::BI__builtin_isunordered: { 468 // Ordered comparisons: we know the arguments to these are matching scalar 469 // floating point values. 470 Value *LHS = EmitScalarExpr(E->getArg(0)); 471 Value *RHS = EmitScalarExpr(E->getArg(1)); 472 473 switch (BuiltinID) { 474 default: llvm_unreachable("Unknown ordered comparison"); 475 case Builtin::BI__builtin_isgreater: 476 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp"); 477 break; 478 case Builtin::BI__builtin_isgreaterequal: 479 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp"); 480 break; 481 case Builtin::BI__builtin_isless: 482 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp"); 483 break; 484 case Builtin::BI__builtin_islessequal: 485 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp"); 486 break; 487 case Builtin::BI__builtin_islessgreater: 488 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp"); 489 break; 490 case Builtin::BI__builtin_isunordered: 491 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp"); 492 break; 493 } 494 // ZExt bool to int type. 495 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType()))); 496 } 497 case Builtin::BI__builtin_isnan: { 498 Value *V = EmitScalarExpr(E->getArg(0)); 499 V = Builder.CreateFCmpUNO(V, V, "cmp"); 500 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); 501 } 502 503 case Builtin::BI__builtin_isinf: { 504 // isinf(x) --> fabs(x) == infinity 505 Value *V = EmitScalarExpr(E->getArg(0)); 506 V = EmitFAbs(*this, V, E->getArg(0)->getType()); 507 508 V = Builder.CreateFCmpOEQ(V, ConstantFP::getInfinity(V->getType()),"isinf"); 509 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); 510 } 511 512 // TODO: BI__builtin_isinf_sign 513 // isinf_sign(x) -> isinf(x) ? (signbit(x) ? -1 : 1) : 0 514 515 case Builtin::BI__builtin_isnormal: { 516 // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min 517 Value *V = EmitScalarExpr(E->getArg(0)); 518 Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq"); 519 520 Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType()); 521 Value *IsLessThanInf = 522 Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf"); 523 APFloat Smallest = APFloat::getSmallestNormalized( 524 getContext().getFloatTypeSemantics(E->getArg(0)->getType())); 525 Value *IsNormal = 526 Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest), 527 "isnormal"); 528 V = Builder.CreateAnd(Eq, IsLessThanInf, "and"); 529 V = Builder.CreateAnd(V, IsNormal, "and"); 530 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); 531 } 532 533 case Builtin::BI__builtin_isfinite: { 534 // isfinite(x) --> x == x && fabs(x) != infinity; 535 Value *V = EmitScalarExpr(E->getArg(0)); 536 Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq"); 537 538 Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType()); 539 Value *IsNotInf = 540 Builder.CreateFCmpUNE(Abs, ConstantFP::getInfinity(V->getType()),"isinf"); 541 542 V = Builder.CreateAnd(Eq, IsNotInf, "and"); 543 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); 544 } 545 546 case Builtin::BI__builtin_fpclassify: { 547 Value *V = EmitScalarExpr(E->getArg(5)); 548 llvm::Type *Ty = ConvertType(E->getArg(5)->getType()); 549 550 // Create Result 551 BasicBlock *Begin = Builder.GetInsertBlock(); 552 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn); 553 Builder.SetInsertPoint(End); 554 PHINode *Result = 555 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4, 556 "fpclassify_result"); 557 558 // if (V==0) return FP_ZERO 559 Builder.SetInsertPoint(Begin); 560 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty), 561 "iszero"); 562 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4)); 563 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn); 564 Builder.CreateCondBr(IsZero, End, NotZero); 565 Result->addIncoming(ZeroLiteral, Begin); 566 567 // if (V != V) return FP_NAN 568 Builder.SetInsertPoint(NotZero); 569 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp"); 570 Value *NanLiteral = EmitScalarExpr(E->getArg(0)); 571 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn); 572 Builder.CreateCondBr(IsNan, End, NotNan); 573 Result->addIncoming(NanLiteral, NotZero); 574 575 // if (fabs(V) == infinity) return FP_INFINITY 576 Builder.SetInsertPoint(NotNan); 577 Value *VAbs = EmitFAbs(*this, V, E->getArg(5)->getType()); 578 Value *IsInf = 579 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()), 580 "isinf"); 581 Value *InfLiteral = EmitScalarExpr(E->getArg(1)); 582 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn); 583 Builder.CreateCondBr(IsInf, End, NotInf); 584 Result->addIncoming(InfLiteral, NotNan); 585 586 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL 587 Builder.SetInsertPoint(NotInf); 588 APFloat Smallest = APFloat::getSmallestNormalized( 589 getContext().getFloatTypeSemantics(E->getArg(5)->getType())); 590 Value *IsNormal = 591 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest), 592 "isnormal"); 593 Value *NormalResult = 594 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)), 595 EmitScalarExpr(E->getArg(3))); 596 Builder.CreateBr(End); 597 Result->addIncoming(NormalResult, NotInf); 598 599 // return Result 600 Builder.SetInsertPoint(End); 601 return RValue::get(Result); 602 } 603 604 case Builtin::BIalloca: 605 case Builtin::BI__builtin_alloca: { 606 Value *Size = EmitScalarExpr(E->getArg(0)); 607 return RValue::get(Builder.CreateAlloca(Builder.getInt8Ty(), Size)); 608 } 609 case Builtin::BIbzero: 610 case Builtin::BI__builtin_bzero: { 611 std::pair<llvm::Value*, unsigned> Dest = 612 EmitPointerWithAlignment(E->getArg(0)); 613 Value *SizeVal = EmitScalarExpr(E->getArg(1)); 614 Builder.CreateMemSet(Dest.first, Builder.getInt8(0), SizeVal, 615 Dest.second, false); 616 return RValue::get(Dest.first); 617 } 618 case Builtin::BImemcpy: 619 case Builtin::BI__builtin_memcpy: { 620 std::pair<llvm::Value*, unsigned> Dest = 621 EmitPointerWithAlignment(E->getArg(0)); 622 std::pair<llvm::Value*, unsigned> Src = 623 EmitPointerWithAlignment(E->getArg(1)); 624 Value *SizeVal = EmitScalarExpr(E->getArg(2)); 625 unsigned Align = std::min(Dest.second, Src.second); 626 Builder.CreateMemCpy(Dest.first, Src.first, SizeVal, Align, false); 627 return RValue::get(Dest.first); 628 } 629 630 case Builtin::BI__builtin___memcpy_chk: { 631 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2. 632 llvm::APSInt Size, DstSize; 633 if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) || 634 !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext())) 635 break; 636 if (Size.ugt(DstSize)) 637 break; 638 std::pair<llvm::Value*, unsigned> Dest = 639 EmitPointerWithAlignment(E->getArg(0)); 640 std::pair<llvm::Value*, unsigned> Src = 641 EmitPointerWithAlignment(E->getArg(1)); 642 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); 643 unsigned Align = std::min(Dest.second, Src.second); 644 Builder.CreateMemCpy(Dest.first, Src.first, SizeVal, Align, false); 645 return RValue::get(Dest.first); 646 } 647 648 case Builtin::BI__builtin_objc_memmove_collectable: { 649 Value *Address = EmitScalarExpr(E->getArg(0)); 650 Value *SrcAddr = EmitScalarExpr(E->getArg(1)); 651 Value *SizeVal = EmitScalarExpr(E->getArg(2)); 652 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, 653 Address, SrcAddr, SizeVal); 654 return RValue::get(Address); 655 } 656 657 case Builtin::BI__builtin___memmove_chk: { 658 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2. 659 llvm::APSInt Size, DstSize; 660 if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) || 661 !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext())) 662 break; 663 if (Size.ugt(DstSize)) 664 break; 665 std::pair<llvm::Value*, unsigned> Dest = 666 EmitPointerWithAlignment(E->getArg(0)); 667 std::pair<llvm::Value*, unsigned> Src = 668 EmitPointerWithAlignment(E->getArg(1)); 669 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); 670 unsigned Align = std::min(Dest.second, Src.second); 671 Builder.CreateMemMove(Dest.first, Src.first, SizeVal, Align, false); 672 return RValue::get(Dest.first); 673 } 674 675 case Builtin::BImemmove: 676 case Builtin::BI__builtin_memmove: { 677 std::pair<llvm::Value*, unsigned> Dest = 678 EmitPointerWithAlignment(E->getArg(0)); 679 std::pair<llvm::Value*, unsigned> Src = 680 EmitPointerWithAlignment(E->getArg(1)); 681 Value *SizeVal = EmitScalarExpr(E->getArg(2)); 682 unsigned Align = std::min(Dest.second, Src.second); 683 Builder.CreateMemMove(Dest.first, Src.first, SizeVal, Align, false); 684 return RValue::get(Dest.first); 685 } 686 case Builtin::BImemset: 687 case Builtin::BI__builtin_memset: { 688 std::pair<llvm::Value*, unsigned> Dest = 689 EmitPointerWithAlignment(E->getArg(0)); 690 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), 691 Builder.getInt8Ty()); 692 Value *SizeVal = EmitScalarExpr(E->getArg(2)); 693 Builder.CreateMemSet(Dest.first, ByteVal, SizeVal, Dest.second, false); 694 return RValue::get(Dest.first); 695 } 696 case Builtin::BI__builtin___memset_chk: { 697 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2. 698 llvm::APSInt Size, DstSize; 699 if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) || 700 !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext())) 701 break; 702 if (Size.ugt(DstSize)) 703 break; 704 std::pair<llvm::Value*, unsigned> Dest = 705 EmitPointerWithAlignment(E->getArg(0)); 706 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), 707 Builder.getInt8Ty()); 708 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); 709 Builder.CreateMemSet(Dest.first, ByteVal, SizeVal, Dest.second, false); 710 return RValue::get(Dest.first); 711 } 712 case Builtin::BI__builtin_dwarf_cfa: { 713 // The offset in bytes from the first argument to the CFA. 714 // 715 // Why on earth is this in the frontend? Is there any reason at 716 // all that the backend can't reasonably determine this while 717 // lowering llvm.eh.dwarf.cfa()? 718 // 719 // TODO: If there's a satisfactory reason, add a target hook for 720 // this instead of hard-coding 0, which is correct for most targets. 721 int32_t Offset = 0; 722 723 Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa); 724 return RValue::get(Builder.CreateCall(F, 725 llvm::ConstantInt::get(Int32Ty, Offset))); 726 } 727 case Builtin::BI__builtin_return_address: { 728 Value *Depth = EmitScalarExpr(E->getArg(0)); 729 Depth = Builder.CreateIntCast(Depth, Int32Ty, false); 730 Value *F = CGM.getIntrinsic(Intrinsic::returnaddress); 731 return RValue::get(Builder.CreateCall(F, Depth)); 732 } 733 case Builtin::BI__builtin_frame_address: { 734 Value *Depth = EmitScalarExpr(E->getArg(0)); 735 Depth = Builder.CreateIntCast(Depth, Int32Ty, false); 736 Value *F = CGM.getIntrinsic(Intrinsic::frameaddress); 737 return RValue::get(Builder.CreateCall(F, Depth)); 738 } 739 case Builtin::BI__builtin_extract_return_addr: { 740 Value *Address = EmitScalarExpr(E->getArg(0)); 741 Value *Result = getTargetHooks().decodeReturnAddress(*this, Address); 742 return RValue::get(Result); 743 } 744 case Builtin::BI__builtin_frob_return_addr: { 745 Value *Address = EmitScalarExpr(E->getArg(0)); 746 Value *Result = getTargetHooks().encodeReturnAddress(*this, Address); 747 return RValue::get(Result); 748 } 749 case Builtin::BI__builtin_dwarf_sp_column: { 750 llvm::IntegerType *Ty 751 = cast<llvm::IntegerType>(ConvertType(E->getType())); 752 int Column = getTargetHooks().getDwarfEHStackPointer(CGM); 753 if (Column == -1) { 754 CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column"); 755 return RValue::get(llvm::UndefValue::get(Ty)); 756 } 757 return RValue::get(llvm::ConstantInt::get(Ty, Column, true)); 758 } 759 case Builtin::BI__builtin_init_dwarf_reg_size_table: { 760 Value *Address = EmitScalarExpr(E->getArg(0)); 761 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address)) 762 CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table"); 763 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType()))); 764 } 765 case Builtin::BI__builtin_eh_return: { 766 Value *Int = EmitScalarExpr(E->getArg(0)); 767 Value *Ptr = EmitScalarExpr(E->getArg(1)); 768 769 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType()); 770 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) && 771 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants"); 772 Value *F = CGM.getIntrinsic(IntTy->getBitWidth() == 32 773 ? Intrinsic::eh_return_i32 774 : Intrinsic::eh_return_i64); 775 Builder.CreateCall2(F, Int, Ptr); 776 Builder.CreateUnreachable(); 777 778 // We do need to preserve an insertion point. 779 EmitBlock(createBasicBlock("builtin_eh_return.cont")); 780 781 return RValue::get(0); 782 } 783 case Builtin::BI__builtin_unwind_init: { 784 Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init); 785 return RValue::get(Builder.CreateCall(F)); 786 } 787 case Builtin::BI__builtin_extend_pointer: { 788 // Extends a pointer to the size of an _Unwind_Word, which is 789 // uint64_t on all platforms. Generally this gets poked into a 790 // register and eventually used as an address, so if the 791 // addressing registers are wider than pointers and the platform 792 // doesn't implicitly ignore high-order bits when doing 793 // addressing, we need to make sure we zext / sext based on 794 // the platform's expectations. 795 // 796 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html 797 798 // Cast the pointer to intptr_t. 799 Value *Ptr = EmitScalarExpr(E->getArg(0)); 800 Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast"); 801 802 // If that's 64 bits, we're done. 803 if (IntPtrTy->getBitWidth() == 64) 804 return RValue::get(Result); 805 806 // Otherwise, ask the codegen data what to do. 807 if (getTargetHooks().extendPointerWithSExt()) 808 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext")); 809 else 810 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext")); 811 } 812 case Builtin::BI__builtin_setjmp: { 813 // Buffer is a void**. 814 Value *Buf = EmitScalarExpr(E->getArg(0)); 815 816 // Store the frame pointer to the setjmp buffer. 817 Value *FrameAddr = 818 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress), 819 ConstantInt::get(Int32Ty, 0)); 820 Builder.CreateStore(FrameAddr, Buf); 821 822 // Store the stack pointer to the setjmp buffer. 823 Value *StackAddr = 824 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave)); 825 Value *StackSaveSlot = 826 Builder.CreateGEP(Buf, ConstantInt::get(Int32Ty, 2)); 827 Builder.CreateStore(StackAddr, StackSaveSlot); 828 829 // Call LLVM's EH setjmp, which is lightweight. 830 Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp); 831 Buf = Builder.CreateBitCast(Buf, Int8PtrTy); 832 return RValue::get(Builder.CreateCall(F, Buf)); 833 } 834 case Builtin::BI__builtin_longjmp: { 835 Value *Buf = EmitScalarExpr(E->getArg(0)); 836 Buf = Builder.CreateBitCast(Buf, Int8PtrTy); 837 838 // Call LLVM's EH longjmp, which is lightweight. 839 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf); 840 841 // longjmp doesn't return; mark this as unreachable. 842 Builder.CreateUnreachable(); 843 844 // We do need to preserve an insertion point. 845 EmitBlock(createBasicBlock("longjmp.cont")); 846 847 return RValue::get(0); 848 } 849 case Builtin::BI__sync_fetch_and_add: 850 case Builtin::BI__sync_fetch_and_sub: 851 case Builtin::BI__sync_fetch_and_or: 852 case Builtin::BI__sync_fetch_and_and: 853 case Builtin::BI__sync_fetch_and_xor: 854 case Builtin::BI__sync_add_and_fetch: 855 case Builtin::BI__sync_sub_and_fetch: 856 case Builtin::BI__sync_and_and_fetch: 857 case Builtin::BI__sync_or_and_fetch: 858 case Builtin::BI__sync_xor_and_fetch: 859 case Builtin::BI__sync_val_compare_and_swap: 860 case Builtin::BI__sync_bool_compare_and_swap: 861 case Builtin::BI__sync_lock_test_and_set: 862 case Builtin::BI__sync_lock_release: 863 case Builtin::BI__sync_swap: 864 llvm_unreachable("Shouldn't make it through sema"); 865 case Builtin::BI__sync_fetch_and_add_1: 866 case Builtin::BI__sync_fetch_and_add_2: 867 case Builtin::BI__sync_fetch_and_add_4: 868 case Builtin::BI__sync_fetch_and_add_8: 869 case Builtin::BI__sync_fetch_and_add_16: 870 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E); 871 case Builtin::BI__sync_fetch_and_sub_1: 872 case Builtin::BI__sync_fetch_and_sub_2: 873 case Builtin::BI__sync_fetch_and_sub_4: 874 case Builtin::BI__sync_fetch_and_sub_8: 875 case Builtin::BI__sync_fetch_and_sub_16: 876 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E); 877 case Builtin::BI__sync_fetch_and_or_1: 878 case Builtin::BI__sync_fetch_and_or_2: 879 case Builtin::BI__sync_fetch_and_or_4: 880 case Builtin::BI__sync_fetch_and_or_8: 881 case Builtin::BI__sync_fetch_and_or_16: 882 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E); 883 case Builtin::BI__sync_fetch_and_and_1: 884 case Builtin::BI__sync_fetch_and_and_2: 885 case Builtin::BI__sync_fetch_and_and_4: 886 case Builtin::BI__sync_fetch_and_and_8: 887 case Builtin::BI__sync_fetch_and_and_16: 888 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E); 889 case Builtin::BI__sync_fetch_and_xor_1: 890 case Builtin::BI__sync_fetch_and_xor_2: 891 case Builtin::BI__sync_fetch_and_xor_4: 892 case Builtin::BI__sync_fetch_and_xor_8: 893 case Builtin::BI__sync_fetch_and_xor_16: 894 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E); 895 896 // Clang extensions: not overloaded yet. 897 case Builtin::BI__sync_fetch_and_min: 898 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E); 899 case Builtin::BI__sync_fetch_and_max: 900 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E); 901 case Builtin::BI__sync_fetch_and_umin: 902 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E); 903 case Builtin::BI__sync_fetch_and_umax: 904 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E); 905 906 case Builtin::BI__sync_add_and_fetch_1: 907 case Builtin::BI__sync_add_and_fetch_2: 908 case Builtin::BI__sync_add_and_fetch_4: 909 case Builtin::BI__sync_add_and_fetch_8: 910 case Builtin::BI__sync_add_and_fetch_16: 911 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E, 912 llvm::Instruction::Add); 913 case Builtin::BI__sync_sub_and_fetch_1: 914 case Builtin::BI__sync_sub_and_fetch_2: 915 case Builtin::BI__sync_sub_and_fetch_4: 916 case Builtin::BI__sync_sub_and_fetch_8: 917 case Builtin::BI__sync_sub_and_fetch_16: 918 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E, 919 llvm::Instruction::Sub); 920 case Builtin::BI__sync_and_and_fetch_1: 921 case Builtin::BI__sync_and_and_fetch_2: 922 case Builtin::BI__sync_and_and_fetch_4: 923 case Builtin::BI__sync_and_and_fetch_8: 924 case Builtin::BI__sync_and_and_fetch_16: 925 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E, 926 llvm::Instruction::And); 927 case Builtin::BI__sync_or_and_fetch_1: 928 case Builtin::BI__sync_or_and_fetch_2: 929 case Builtin::BI__sync_or_and_fetch_4: 930 case Builtin::BI__sync_or_and_fetch_8: 931 case Builtin::BI__sync_or_and_fetch_16: 932 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E, 933 llvm::Instruction::Or); 934 case Builtin::BI__sync_xor_and_fetch_1: 935 case Builtin::BI__sync_xor_and_fetch_2: 936 case Builtin::BI__sync_xor_and_fetch_4: 937 case Builtin::BI__sync_xor_and_fetch_8: 938 case Builtin::BI__sync_xor_and_fetch_16: 939 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E, 940 llvm::Instruction::Xor); 941 942 case Builtin::BI__sync_val_compare_and_swap_1: 943 case Builtin::BI__sync_val_compare_and_swap_2: 944 case Builtin::BI__sync_val_compare_and_swap_4: 945 case Builtin::BI__sync_val_compare_and_swap_8: 946 case Builtin::BI__sync_val_compare_and_swap_16: { 947 QualType T = E->getType(); 948 llvm::Value *DestPtr = EmitScalarExpr(E->getArg(0)); 949 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); 950 951 llvm::IntegerType *IntType = 952 llvm::IntegerType::get(getLLVMContext(), 953 getContext().getTypeSize(T)); 954 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); 955 956 Value *Args[3]; 957 Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType); 958 Args[1] = EmitScalarExpr(E->getArg(1)); 959 llvm::Type *ValueType = Args[1]->getType(); 960 Args[1] = EmitToInt(*this, Args[1], T, IntType); 961 Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType); 962 963 Value *Result = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2], 964 llvm::SequentiallyConsistent); 965 Result = EmitFromInt(*this, Result, T, ValueType); 966 return RValue::get(Result); 967 } 968 969 case Builtin::BI__sync_bool_compare_and_swap_1: 970 case Builtin::BI__sync_bool_compare_and_swap_2: 971 case Builtin::BI__sync_bool_compare_and_swap_4: 972 case Builtin::BI__sync_bool_compare_and_swap_8: 973 case Builtin::BI__sync_bool_compare_and_swap_16: { 974 QualType T = E->getArg(1)->getType(); 975 llvm::Value *DestPtr = EmitScalarExpr(E->getArg(0)); 976 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); 977 978 llvm::IntegerType *IntType = 979 llvm::IntegerType::get(getLLVMContext(), 980 getContext().getTypeSize(T)); 981 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); 982 983 Value *Args[3]; 984 Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType); 985 Args[1] = EmitToInt(*this, EmitScalarExpr(E->getArg(1)), T, IntType); 986 Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType); 987 988 Value *OldVal = Args[1]; 989 Value *PrevVal = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2], 990 llvm::SequentiallyConsistent); 991 Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal); 992 // zext bool to int. 993 Result = Builder.CreateZExt(Result, ConvertType(E->getType())); 994 return RValue::get(Result); 995 } 996 997 case Builtin::BI__sync_swap_1: 998 case Builtin::BI__sync_swap_2: 999 case Builtin::BI__sync_swap_4: 1000 case Builtin::BI__sync_swap_8: 1001 case Builtin::BI__sync_swap_16: 1002 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E); 1003 1004 case Builtin::BI__sync_lock_test_and_set_1: 1005 case Builtin::BI__sync_lock_test_and_set_2: 1006 case Builtin::BI__sync_lock_test_and_set_4: 1007 case Builtin::BI__sync_lock_test_and_set_8: 1008 case Builtin::BI__sync_lock_test_and_set_16: 1009 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E); 1010 1011 case Builtin::BI__sync_lock_release_1: 1012 case Builtin::BI__sync_lock_release_2: 1013 case Builtin::BI__sync_lock_release_4: 1014 case Builtin::BI__sync_lock_release_8: 1015 case Builtin::BI__sync_lock_release_16: { 1016 Value *Ptr = EmitScalarExpr(E->getArg(0)); 1017 QualType ElTy = E->getArg(0)->getType()->getPointeeType(); 1018 CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy); 1019 llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(), 1020 StoreSize.getQuantity() * 8); 1021 Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo()); 1022 llvm::StoreInst *Store = 1023 Builder.CreateStore(llvm::Constant::getNullValue(ITy), Ptr); 1024 Store->setAlignment(StoreSize.getQuantity()); 1025 Store->setAtomic(llvm::Release); 1026 return RValue::get(0); 1027 } 1028 1029 case Builtin::BI__sync_synchronize: { 1030 // We assume this is supposed to correspond to a C++0x-style 1031 // sequentially-consistent fence (i.e. this is only usable for 1032 // synchonization, not device I/O or anything like that). This intrinsic 1033 // is really badly designed in the sense that in theory, there isn't 1034 // any way to safely use it... but in practice, it mostly works 1035 // to use it with non-atomic loads and stores to get acquire/release 1036 // semantics. 1037 Builder.CreateFence(llvm::SequentiallyConsistent); 1038 return RValue::get(0); 1039 } 1040 1041 case Builtin::BI__c11_atomic_is_lock_free: 1042 case Builtin::BI__atomic_is_lock_free: { 1043 // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the 1044 // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since 1045 // _Atomic(T) is always properly-aligned. 1046 const char *LibCallName = "__atomic_is_lock_free"; 1047 CallArgList Args; 1048 Args.add(RValue::get(EmitScalarExpr(E->getArg(0))), 1049 getContext().getSizeType()); 1050 if (BuiltinID == Builtin::BI__atomic_is_lock_free) 1051 Args.add(RValue::get(EmitScalarExpr(E->getArg(1))), 1052 getContext().VoidPtrTy); 1053 else 1054 Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)), 1055 getContext().VoidPtrTy); 1056 const CGFunctionInfo &FuncInfo = 1057 CGM.getTypes().arrangeFreeFunctionCall(E->getType(), Args, 1058 FunctionType::ExtInfo(), 1059 RequiredArgs::All); 1060 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo); 1061 llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName); 1062 return EmitCall(FuncInfo, Func, ReturnValueSlot(), Args); 1063 } 1064 1065 case Builtin::BI__atomic_test_and_set: { 1066 // Look at the argument type to determine whether this is a volatile 1067 // operation. The parameter type is always volatile. 1068 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); 1069 bool Volatile = 1070 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); 1071 1072 Value *Ptr = EmitScalarExpr(E->getArg(0)); 1073 unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace(); 1074 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace)); 1075 Value *NewVal = Builder.getInt8(1); 1076 Value *Order = EmitScalarExpr(E->getArg(1)); 1077 if (isa<llvm::ConstantInt>(Order)) { 1078 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); 1079 AtomicRMWInst *Result = 0; 1080 switch (ord) { 1081 case 0: // memory_order_relaxed 1082 default: // invalid order 1083 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, 1084 Ptr, NewVal, 1085 llvm::Monotonic); 1086 break; 1087 case 1: // memory_order_consume 1088 case 2: // memory_order_acquire 1089 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, 1090 Ptr, NewVal, 1091 llvm::Acquire); 1092 break; 1093 case 3: // memory_order_release 1094 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, 1095 Ptr, NewVal, 1096 llvm::Release); 1097 break; 1098 case 4: // memory_order_acq_rel 1099 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, 1100 Ptr, NewVal, 1101 llvm::AcquireRelease); 1102 break; 1103 case 5: // memory_order_seq_cst 1104 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, 1105 Ptr, NewVal, 1106 llvm::SequentiallyConsistent); 1107 break; 1108 } 1109 Result->setVolatile(Volatile); 1110 return RValue::get(Builder.CreateIsNotNull(Result, "tobool")); 1111 } 1112 1113 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); 1114 1115 llvm::BasicBlock *BBs[5] = { 1116 createBasicBlock("monotonic", CurFn), 1117 createBasicBlock("acquire", CurFn), 1118 createBasicBlock("release", CurFn), 1119 createBasicBlock("acqrel", CurFn), 1120 createBasicBlock("seqcst", CurFn) 1121 }; 1122 llvm::AtomicOrdering Orders[5] = { 1123 llvm::Monotonic, llvm::Acquire, llvm::Release, 1124 llvm::AcquireRelease, llvm::SequentiallyConsistent 1125 }; 1126 1127 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); 1128 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]); 1129 1130 Builder.SetInsertPoint(ContBB); 1131 PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set"); 1132 1133 for (unsigned i = 0; i < 5; ++i) { 1134 Builder.SetInsertPoint(BBs[i]); 1135 AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, 1136 Ptr, NewVal, Orders[i]); 1137 RMW->setVolatile(Volatile); 1138 Result->addIncoming(RMW, BBs[i]); 1139 Builder.CreateBr(ContBB); 1140 } 1141 1142 SI->addCase(Builder.getInt32(0), BBs[0]); 1143 SI->addCase(Builder.getInt32(1), BBs[1]); 1144 SI->addCase(Builder.getInt32(2), BBs[1]); 1145 SI->addCase(Builder.getInt32(3), BBs[2]); 1146 SI->addCase(Builder.getInt32(4), BBs[3]); 1147 SI->addCase(Builder.getInt32(5), BBs[4]); 1148 1149 Builder.SetInsertPoint(ContBB); 1150 return RValue::get(Builder.CreateIsNotNull(Result, "tobool")); 1151 } 1152 1153 case Builtin::BI__atomic_clear: { 1154 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); 1155 bool Volatile = 1156 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); 1157 1158 Value *Ptr = EmitScalarExpr(E->getArg(0)); 1159 unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace(); 1160 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace)); 1161 Value *NewVal = Builder.getInt8(0); 1162 Value *Order = EmitScalarExpr(E->getArg(1)); 1163 if (isa<llvm::ConstantInt>(Order)) { 1164 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); 1165 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile); 1166 Store->setAlignment(1); 1167 switch (ord) { 1168 case 0: // memory_order_relaxed 1169 default: // invalid order 1170 Store->setOrdering(llvm::Monotonic); 1171 break; 1172 case 3: // memory_order_release 1173 Store->setOrdering(llvm::Release); 1174 break; 1175 case 5: // memory_order_seq_cst 1176 Store->setOrdering(llvm::SequentiallyConsistent); 1177 break; 1178 } 1179 return RValue::get(0); 1180 } 1181 1182 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); 1183 1184 llvm::BasicBlock *BBs[3] = { 1185 createBasicBlock("monotonic", CurFn), 1186 createBasicBlock("release", CurFn), 1187 createBasicBlock("seqcst", CurFn) 1188 }; 1189 llvm::AtomicOrdering Orders[3] = { 1190 llvm::Monotonic, llvm::Release, llvm::SequentiallyConsistent 1191 }; 1192 1193 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); 1194 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]); 1195 1196 for (unsigned i = 0; i < 3; ++i) { 1197 Builder.SetInsertPoint(BBs[i]); 1198 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile); 1199 Store->setAlignment(1); 1200 Store->setOrdering(Orders[i]); 1201 Builder.CreateBr(ContBB); 1202 } 1203 1204 SI->addCase(Builder.getInt32(0), BBs[0]); 1205 SI->addCase(Builder.getInt32(3), BBs[1]); 1206 SI->addCase(Builder.getInt32(5), BBs[2]); 1207 1208 Builder.SetInsertPoint(ContBB); 1209 return RValue::get(0); 1210 } 1211 1212 case Builtin::BI__atomic_thread_fence: 1213 case Builtin::BI__atomic_signal_fence: 1214 case Builtin::BI__c11_atomic_thread_fence: 1215 case Builtin::BI__c11_atomic_signal_fence: { 1216 llvm::SynchronizationScope Scope; 1217 if (BuiltinID == Builtin::BI__atomic_signal_fence || 1218 BuiltinID == Builtin::BI__c11_atomic_signal_fence) 1219 Scope = llvm::SingleThread; 1220 else 1221 Scope = llvm::CrossThread; 1222 Value *Order = EmitScalarExpr(E->getArg(0)); 1223 if (isa<llvm::ConstantInt>(Order)) { 1224 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); 1225 switch (ord) { 1226 case 0: // memory_order_relaxed 1227 default: // invalid order 1228 break; 1229 case 1: // memory_order_consume 1230 case 2: // memory_order_acquire 1231 Builder.CreateFence(llvm::Acquire, Scope); 1232 break; 1233 case 3: // memory_order_release 1234 Builder.CreateFence(llvm::Release, Scope); 1235 break; 1236 case 4: // memory_order_acq_rel 1237 Builder.CreateFence(llvm::AcquireRelease, Scope); 1238 break; 1239 case 5: // memory_order_seq_cst 1240 Builder.CreateFence(llvm::SequentiallyConsistent, Scope); 1241 break; 1242 } 1243 return RValue::get(0); 1244 } 1245 1246 llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB; 1247 AcquireBB = createBasicBlock("acquire", CurFn); 1248 ReleaseBB = createBasicBlock("release", CurFn); 1249 AcqRelBB = createBasicBlock("acqrel", CurFn); 1250 SeqCstBB = createBasicBlock("seqcst", CurFn); 1251 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); 1252 1253 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); 1254 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB); 1255 1256 Builder.SetInsertPoint(AcquireBB); 1257 Builder.CreateFence(llvm::Acquire, Scope); 1258 Builder.CreateBr(ContBB); 1259 SI->addCase(Builder.getInt32(1), AcquireBB); 1260 SI->addCase(Builder.getInt32(2), AcquireBB); 1261 1262 Builder.SetInsertPoint(ReleaseBB); 1263 Builder.CreateFence(llvm::Release, Scope); 1264 Builder.CreateBr(ContBB); 1265 SI->addCase(Builder.getInt32(3), ReleaseBB); 1266 1267 Builder.SetInsertPoint(AcqRelBB); 1268 Builder.CreateFence(llvm::AcquireRelease, Scope); 1269 Builder.CreateBr(ContBB); 1270 SI->addCase(Builder.getInt32(4), AcqRelBB); 1271 1272 Builder.SetInsertPoint(SeqCstBB); 1273 Builder.CreateFence(llvm::SequentiallyConsistent, Scope); 1274 Builder.CreateBr(ContBB); 1275 SI->addCase(Builder.getInt32(5), SeqCstBB); 1276 1277 Builder.SetInsertPoint(ContBB); 1278 return RValue::get(0); 1279 } 1280 1281 // Library functions with special handling. 1282 case Builtin::BIsqrt: 1283 case Builtin::BIsqrtf: 1284 case Builtin::BIsqrtl: { 1285 // Transform a call to sqrt* into a @llvm.sqrt.* intrinsic call, but only 1286 // in finite- or unsafe-math mode (the intrinsic has different semantics 1287 // for handling negative numbers compared to the library function, so 1288 // -fmath-errno=0 is not enough). 1289 if (!FD->hasAttr<ConstAttr>()) 1290 break; 1291 if (!(CGM.getCodeGenOpts().UnsafeFPMath || 1292 CGM.getCodeGenOpts().NoNaNsFPMath)) 1293 break; 1294 Value *Arg0 = EmitScalarExpr(E->getArg(0)); 1295 llvm::Type *ArgType = Arg0->getType(); 1296 Value *F = CGM.getIntrinsic(Intrinsic::sqrt, ArgType); 1297 return RValue::get(Builder.CreateCall(F, Arg0)); 1298 } 1299 1300 case Builtin::BIpow: 1301 case Builtin::BIpowf: 1302 case Builtin::BIpowl: { 1303 // Transform a call to pow* into a @llvm.pow.* intrinsic call. 1304 if (!FD->hasAttr<ConstAttr>()) 1305 break; 1306 Value *Base = EmitScalarExpr(E->getArg(0)); 1307 Value *Exponent = EmitScalarExpr(E->getArg(1)); 1308 llvm::Type *ArgType = Base->getType(); 1309 Value *F = CGM.getIntrinsic(Intrinsic::pow, ArgType); 1310 return RValue::get(Builder.CreateCall2(F, Base, Exponent)); 1311 break; 1312 } 1313 1314 case Builtin::BIfma: 1315 case Builtin::BIfmaf: 1316 case Builtin::BIfmal: 1317 case Builtin::BI__builtin_fma: 1318 case Builtin::BI__builtin_fmaf: 1319 case Builtin::BI__builtin_fmal: { 1320 // Rewrite fma to intrinsic. 1321 Value *FirstArg = EmitScalarExpr(E->getArg(0)); 1322 llvm::Type *ArgType = FirstArg->getType(); 1323 Value *F = CGM.getIntrinsic(Intrinsic::fma, ArgType); 1324 return RValue::get(Builder.CreateCall3(F, FirstArg, 1325 EmitScalarExpr(E->getArg(1)), 1326 EmitScalarExpr(E->getArg(2)))); 1327 } 1328 1329 case Builtin::BI__builtin_signbit: 1330 case Builtin::BI__builtin_signbitf: 1331 case Builtin::BI__builtin_signbitl: { 1332 LLVMContext &C = CGM.getLLVMContext(); 1333 1334 Value *Arg = EmitScalarExpr(E->getArg(0)); 1335 llvm::Type *ArgTy = Arg->getType(); 1336 if (ArgTy->isPPC_FP128Ty()) 1337 break; // FIXME: I'm not sure what the right implementation is here. 1338 int ArgWidth = ArgTy->getPrimitiveSizeInBits(); 1339 llvm::Type *ArgIntTy = llvm::IntegerType::get(C, ArgWidth); 1340 Value *BCArg = Builder.CreateBitCast(Arg, ArgIntTy); 1341 Value *ZeroCmp = llvm::Constant::getNullValue(ArgIntTy); 1342 Value *Result = Builder.CreateICmpSLT(BCArg, ZeroCmp); 1343 return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType()))); 1344 } 1345 case Builtin::BI__builtin_annotation: { 1346 llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0)); 1347 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::annotation, 1348 AnnVal->getType()); 1349 1350 // Get the annotation string, go through casts. Sema requires this to be a 1351 // non-wide string literal, potentially casted, so the cast<> is safe. 1352 const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts(); 1353 StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString(); 1354 return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc())); 1355 } 1356 case Builtin::BI__builtin_addcb: 1357 case Builtin::BI__builtin_addcs: 1358 case Builtin::BI__builtin_addc: 1359 case Builtin::BI__builtin_addcl: 1360 case Builtin::BI__builtin_addcll: 1361 case Builtin::BI__builtin_subcb: 1362 case Builtin::BI__builtin_subcs: 1363 case Builtin::BI__builtin_subc: 1364 case Builtin::BI__builtin_subcl: 1365 case Builtin::BI__builtin_subcll: { 1366 1367 // We translate all of these builtins from expressions of the form: 1368 // int x = ..., y = ..., carryin = ..., carryout, result; 1369 // result = __builtin_addc(x, y, carryin, &carryout); 1370 // 1371 // to LLVM IR of the form: 1372 // 1373 // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y) 1374 // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0 1375 // %carry1 = extractvalue {i32, i1} %tmp1, 1 1376 // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1, 1377 // i32 %carryin) 1378 // %result = extractvalue {i32, i1} %tmp2, 0 1379 // %carry2 = extractvalue {i32, i1} %tmp2, 1 1380 // %tmp3 = or i1 %carry1, %carry2 1381 // %tmp4 = zext i1 %tmp3 to i32 1382 // store i32 %tmp4, i32* %carryout 1383 1384 // Scalarize our inputs. 1385 llvm::Value *X = EmitScalarExpr(E->getArg(0)); 1386 llvm::Value *Y = EmitScalarExpr(E->getArg(1)); 1387 llvm::Value *Carryin = EmitScalarExpr(E->getArg(2)); 1388 std::pair<llvm::Value*, unsigned> CarryOutPtr = 1389 EmitPointerWithAlignment(E->getArg(3)); 1390 1391 // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow. 1392 llvm::Intrinsic::ID IntrinsicId; 1393 switch (BuiltinID) { 1394 default: llvm_unreachable("Unknown multiprecision builtin id."); 1395 case Builtin::BI__builtin_addcb: 1396 case Builtin::BI__builtin_addcs: 1397 case Builtin::BI__builtin_addc: 1398 case Builtin::BI__builtin_addcl: 1399 case Builtin::BI__builtin_addcll: 1400 IntrinsicId = llvm::Intrinsic::uadd_with_overflow; 1401 break; 1402 case Builtin::BI__builtin_subcb: 1403 case Builtin::BI__builtin_subcs: 1404 case Builtin::BI__builtin_subc: 1405 case Builtin::BI__builtin_subcl: 1406 case Builtin::BI__builtin_subcll: 1407 IntrinsicId = llvm::Intrinsic::usub_with_overflow; 1408 break; 1409 } 1410 1411 // Construct our resulting LLVM IR expression. 1412 llvm::Value *Carry1; 1413 llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId, 1414 X, Y, Carry1); 1415 llvm::Value *Carry2; 1416 llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId, 1417 Sum1, Carryin, Carry2); 1418 llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2), 1419 X->getType()); 1420 llvm::StoreInst *CarryOutStore = Builder.CreateStore(CarryOut, 1421 CarryOutPtr.first); 1422 CarryOutStore->setAlignment(CarryOutPtr.second); 1423 return RValue::get(Sum2); 1424 } 1425 case Builtin::BI__builtin_uadd_overflow: 1426 case Builtin::BI__builtin_uaddl_overflow: 1427 case Builtin::BI__builtin_uaddll_overflow: 1428 case Builtin::BI__builtin_usub_overflow: 1429 case Builtin::BI__builtin_usubl_overflow: 1430 case Builtin::BI__builtin_usubll_overflow: 1431 case Builtin::BI__builtin_umul_overflow: 1432 case Builtin::BI__builtin_umull_overflow: 1433 case Builtin::BI__builtin_umulll_overflow: 1434 case Builtin::BI__builtin_sadd_overflow: 1435 case Builtin::BI__builtin_saddl_overflow: 1436 case Builtin::BI__builtin_saddll_overflow: 1437 case Builtin::BI__builtin_ssub_overflow: 1438 case Builtin::BI__builtin_ssubl_overflow: 1439 case Builtin::BI__builtin_ssubll_overflow: 1440 case Builtin::BI__builtin_smul_overflow: 1441 case Builtin::BI__builtin_smull_overflow: 1442 case Builtin::BI__builtin_smulll_overflow: { 1443 1444 // We translate all of these builtins directly to the relevant llvm IR node. 1445 1446 // Scalarize our inputs. 1447 llvm::Value *X = EmitScalarExpr(E->getArg(0)); 1448 llvm::Value *Y = EmitScalarExpr(E->getArg(1)); 1449 std::pair<llvm::Value *, unsigned> SumOutPtr = 1450 EmitPointerWithAlignment(E->getArg(2)); 1451 1452 // Decide which of the overflow intrinsics we are lowering to: 1453 llvm::Intrinsic::ID IntrinsicId; 1454 switch (BuiltinID) { 1455 default: llvm_unreachable("Unknown security overflow builtin id."); 1456 case Builtin::BI__builtin_uadd_overflow: 1457 case Builtin::BI__builtin_uaddl_overflow: 1458 case Builtin::BI__builtin_uaddll_overflow: 1459 IntrinsicId = llvm::Intrinsic::uadd_with_overflow; 1460 break; 1461 case Builtin::BI__builtin_usub_overflow: 1462 case Builtin::BI__builtin_usubl_overflow: 1463 case Builtin::BI__builtin_usubll_overflow: 1464 IntrinsicId = llvm::Intrinsic::usub_with_overflow; 1465 break; 1466 case Builtin::BI__builtin_umul_overflow: 1467 case Builtin::BI__builtin_umull_overflow: 1468 case Builtin::BI__builtin_umulll_overflow: 1469 IntrinsicId = llvm::Intrinsic::umul_with_overflow; 1470 break; 1471 case Builtin::BI__builtin_sadd_overflow: 1472 case Builtin::BI__builtin_saddl_overflow: 1473 case Builtin::BI__builtin_saddll_overflow: 1474 IntrinsicId = llvm::Intrinsic::sadd_with_overflow; 1475 break; 1476 case Builtin::BI__builtin_ssub_overflow: 1477 case Builtin::BI__builtin_ssubl_overflow: 1478 case Builtin::BI__builtin_ssubll_overflow: 1479 IntrinsicId = llvm::Intrinsic::ssub_with_overflow; 1480 break; 1481 case Builtin::BI__builtin_smul_overflow: 1482 case Builtin::BI__builtin_smull_overflow: 1483 case Builtin::BI__builtin_smulll_overflow: 1484 IntrinsicId = llvm::Intrinsic::smul_with_overflow; 1485 break; 1486 } 1487 1488 1489 llvm::Value *Carry; 1490 llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry); 1491 llvm::StoreInst *SumOutStore = Builder.CreateStore(Sum, SumOutPtr.first); 1492 SumOutStore->setAlignment(SumOutPtr.second); 1493 1494 return RValue::get(Carry); 1495 } 1496 case Builtin::BI__builtin_addressof: 1497 return RValue::get(EmitLValue(E->getArg(0)).getAddress()); 1498 case Builtin::BI__noop: 1499 return RValue::get(0); 1500 } 1501 1502 // If this is an alias for a lib function (e.g. __builtin_sin), emit 1503 // the call using the normal call path, but using the unmangled 1504 // version of the function name. 1505 if (getContext().BuiltinInfo.isLibFunction(BuiltinID)) 1506 return emitLibraryCall(*this, FD, E, 1507 CGM.getBuiltinLibFunction(FD, BuiltinID)); 1508 1509 // If this is a predefined lib function (e.g. malloc), emit the call 1510 // using exactly the normal call path. 1511 if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID)) 1512 return emitLibraryCall(*this, FD, E, EmitScalarExpr(E->getCallee())); 1513 1514 // See if we have a target specific intrinsic. 1515 const char *Name = getContext().BuiltinInfo.GetName(BuiltinID); 1516 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic; 1517 if (const char *Prefix = 1518 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch())) 1519 IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix, Name); 1520 1521 if (IntrinsicID != Intrinsic::not_intrinsic) { 1522 SmallVector<Value*, 16> Args; 1523 1524 // Find out if any arguments are required to be integer constant 1525 // expressions. 1526 unsigned ICEArguments = 0; 1527 ASTContext::GetBuiltinTypeError Error; 1528 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); 1529 assert(Error == ASTContext::GE_None && "Should not codegen an error"); 1530 1531 Function *F = CGM.getIntrinsic(IntrinsicID); 1532 llvm::FunctionType *FTy = F->getFunctionType(); 1533 1534 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) { 1535 Value *ArgValue; 1536 // If this is a normal argument, just emit it as a scalar. 1537 if ((ICEArguments & (1 << i)) == 0) { 1538 ArgValue = EmitScalarExpr(E->getArg(i)); 1539 } else { 1540 // If this is required to be a constant, constant fold it so that we 1541 // know that the generated intrinsic gets a ConstantInt. 1542 llvm::APSInt Result; 1543 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result,getContext()); 1544 assert(IsConst && "Constant arg isn't actually constant?"); 1545 (void)IsConst; 1546 ArgValue = llvm::ConstantInt::get(getLLVMContext(), Result); 1547 } 1548 1549 // If the intrinsic arg type is different from the builtin arg type 1550 // we need to do a bit cast. 1551 llvm::Type *PTy = FTy->getParamType(i); 1552 if (PTy != ArgValue->getType()) { 1553 assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) && 1554 "Must be able to losslessly bit cast to param"); 1555 ArgValue = Builder.CreateBitCast(ArgValue, PTy); 1556 } 1557 1558 Args.push_back(ArgValue); 1559 } 1560 1561 Value *V = Builder.CreateCall(F, Args); 1562 QualType BuiltinRetType = E->getType(); 1563 1564 llvm::Type *RetTy = VoidTy; 1565 if (!BuiltinRetType->isVoidType()) 1566 RetTy = ConvertType(BuiltinRetType); 1567 1568 if (RetTy != V->getType()) { 1569 assert(V->getType()->canLosslesslyBitCastTo(RetTy) && 1570 "Must be able to losslessly bit cast result type"); 1571 V = Builder.CreateBitCast(V, RetTy); 1572 } 1573 1574 return RValue::get(V); 1575 } 1576 1577 // See if we have a target specific builtin that needs to be lowered. 1578 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E)) 1579 return RValue::get(V); 1580 1581 ErrorUnsupported(E, "builtin function"); 1582 1583 // Unknown builtin, for now just dump it out and return undef. 1584 return GetUndefRValue(E->getType()); 1585} 1586 1587Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID, 1588 const CallExpr *E) { 1589 switch (getTarget().getTriple().getArch()) { 1590 case llvm::Triple::aarch64: 1591 return EmitAArch64BuiltinExpr(BuiltinID, E); 1592 case llvm::Triple::arm: 1593 case llvm::Triple::thumb: 1594 return EmitARMBuiltinExpr(BuiltinID, E); 1595 case llvm::Triple::x86: 1596 case llvm::Triple::x86_64: 1597 return EmitX86BuiltinExpr(BuiltinID, E); 1598 case llvm::Triple::ppc: 1599 case llvm::Triple::ppc64: 1600 case llvm::Triple::ppc64le: 1601 return EmitPPCBuiltinExpr(BuiltinID, E); 1602 default: 1603 return 0; 1604 } 1605} 1606 1607static llvm::VectorType *GetNeonType(CodeGenFunction *CGF, 1608 NeonTypeFlags TypeFlags, 1609 bool V1Ty=false) { 1610 int IsQuad = TypeFlags.isQuad(); 1611 switch (TypeFlags.getEltType()) { 1612 case NeonTypeFlags::Int8: 1613 case NeonTypeFlags::Poly8: 1614 return llvm::VectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad)); 1615 case NeonTypeFlags::Int16: 1616 case NeonTypeFlags::Poly16: 1617 case NeonTypeFlags::Float16: 1618 return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad)); 1619 case NeonTypeFlags::Int32: 1620 return llvm::VectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad)); 1621 case NeonTypeFlags::Int64: 1622 return llvm::VectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad)); 1623 case NeonTypeFlags::Float32: 1624 return llvm::VectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad)); 1625 case NeonTypeFlags::Float64: 1626 return llvm::VectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad)); 1627 } 1628 llvm_unreachable("Unknown vector element type!"); 1629} 1630 1631Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) { 1632 unsigned nElts = cast<llvm::VectorType>(V->getType())->getNumElements(); 1633 Value* SV = llvm::ConstantVector::getSplat(nElts, C); 1634 return Builder.CreateShuffleVector(V, V, SV, "lane"); 1635} 1636 1637Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops, 1638 const char *name, 1639 unsigned shift, bool rightshift) { 1640 unsigned j = 0; 1641 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end(); 1642 ai != ae; ++ai, ++j) 1643 if (shift > 0 && shift == j) 1644 Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift); 1645 else 1646 Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name); 1647 1648 return Builder.CreateCall(F, Ops, name); 1649} 1650 1651Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty, 1652 bool neg) { 1653 int SV = cast<ConstantInt>(V)->getSExtValue(); 1654 1655 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty); 1656 llvm::Constant *C = ConstantInt::get(VTy->getElementType(), neg ? -SV : SV); 1657 return llvm::ConstantVector::getSplat(VTy->getNumElements(), C); 1658} 1659 1660/// GetPointeeAlignment - Given an expression with a pointer type, find the 1661/// alignment of the type referenced by the pointer. Skip over implicit 1662/// casts. 1663std::pair<llvm::Value*, unsigned> 1664CodeGenFunction::EmitPointerWithAlignment(const Expr *Addr) { 1665 assert(Addr->getType()->isPointerType()); 1666 Addr = Addr->IgnoreParens(); 1667 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Addr)) { 1668 if ((ICE->getCastKind() == CK_BitCast || ICE->getCastKind() == CK_NoOp) && 1669 ICE->getSubExpr()->getType()->isPointerType()) { 1670 std::pair<llvm::Value*, unsigned> Ptr = 1671 EmitPointerWithAlignment(ICE->getSubExpr()); 1672 Ptr.first = Builder.CreateBitCast(Ptr.first, 1673 ConvertType(Addr->getType())); 1674 return Ptr; 1675 } else if (ICE->getCastKind() == CK_ArrayToPointerDecay) { 1676 LValue LV = EmitLValue(ICE->getSubExpr()); 1677 unsigned Align = LV.getAlignment().getQuantity(); 1678 if (!Align) { 1679 // FIXME: Once LValues are fixed to always set alignment, 1680 // zap this code. 1681 QualType PtTy = ICE->getSubExpr()->getType(); 1682 if (!PtTy->isIncompleteType()) 1683 Align = getContext().getTypeAlignInChars(PtTy).getQuantity(); 1684 else 1685 Align = 1; 1686 } 1687 return std::make_pair(LV.getAddress(), Align); 1688 } 1689 } 1690 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Addr)) { 1691 if (UO->getOpcode() == UO_AddrOf) { 1692 LValue LV = EmitLValue(UO->getSubExpr()); 1693 unsigned Align = LV.getAlignment().getQuantity(); 1694 if (!Align) { 1695 // FIXME: Once LValues are fixed to always set alignment, 1696 // zap this code. 1697 QualType PtTy = UO->getSubExpr()->getType(); 1698 if (!PtTy->isIncompleteType()) 1699 Align = getContext().getTypeAlignInChars(PtTy).getQuantity(); 1700 else 1701 Align = 1; 1702 } 1703 return std::make_pair(LV.getAddress(), Align); 1704 } 1705 } 1706 1707 unsigned Align = 1; 1708 QualType PtTy = Addr->getType()->getPointeeType(); 1709 if (!PtTy->isIncompleteType()) 1710 Align = getContext().getTypeAlignInChars(PtTy).getQuantity(); 1711 1712 return std::make_pair(EmitScalarExpr(Addr), Align); 1713} 1714 1715static Value *EmitAArch64ScalarBuiltinExpr(CodeGenFunction &CGF, 1716 unsigned BuiltinID, 1717 const CallExpr *E) { 1718 NeonTypeFlags::EltType ET; 1719 bool usgn; 1720 unsigned int Int = 0; 1721 bool OverloadInt = true; 1722 const char *s = NULL; 1723 1724 SmallVector<Value *, 4> Ops; 1725 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) { 1726 Ops.push_back(CGF.EmitScalarExpr(E->getArg(i))); 1727 } 1728 1729 // AArch64 scalar builtins are not overloaded, they do not have an extra 1730 // argument that specifies the vector type, need to handle each case. 1731 switch (BuiltinID) { 1732 default: break; 1733 // Scalar Add 1734 case AArch64::BI__builtin_neon_vaddd_s64: 1735 ET = NeonTypeFlags::Int64; Int = Intrinsic::aarch64_neon_vaddds; 1736 s = "vaddds"; usgn = false; OverloadInt = false; break; 1737 case AArch64::BI__builtin_neon_vaddd_u64: 1738 ET = NeonTypeFlags::Int64; Int = Intrinsic::aarch64_neon_vadddu; 1739 s = "vadddu"; usgn = true; OverloadInt = false; break; 1740 // Scalar Sub 1741 case AArch64::BI__builtin_neon_vsubd_s64: 1742 ET = NeonTypeFlags::Int64; Int = Intrinsic::aarch64_neon_vsubds; 1743 s = "vsubds"; usgn = false; OverloadInt = false; break; 1744 case AArch64::BI__builtin_neon_vsubd_u64: 1745 ET = NeonTypeFlags::Int64; Int = Intrinsic::aarch64_neon_vsubdu; 1746 s = "vsubdu"; usgn = true; OverloadInt = false; break; 1747 // Scalar Saturating Add 1748 case AArch64::BI__builtin_neon_vqaddb_s8: 1749 ET = NeonTypeFlags::Int8; Int = Intrinsic::aarch64_neon_vqadds; 1750 s = "vqadds"; usgn = false; OverloadInt = true; break; 1751 case AArch64::BI__builtin_neon_vqaddh_s16: 1752 ET = NeonTypeFlags::Int16; Int = Intrinsic::aarch64_neon_vqadds; 1753 s = "vqadds"; usgn = false; OverloadInt = true; break; 1754 case AArch64::BI__builtin_neon_vqadds_s32: 1755 ET = NeonTypeFlags::Int32; Int = Intrinsic::aarch64_neon_vqadds; 1756 s = "vqadds"; usgn = false; OverloadInt = true; break; 1757 case AArch64::BI__builtin_neon_vqaddd_s64: 1758 ET = NeonTypeFlags::Int64; Int = Intrinsic::aarch64_neon_vqadds; 1759 s = "vqadds"; usgn = false; OverloadInt = true; break; 1760 case AArch64::BI__builtin_neon_vqaddb_u8: 1761 ET = NeonTypeFlags::Int8; Int = Intrinsic::aarch64_neon_vqaddu; 1762 s = "vqaddu"; usgn = true; OverloadInt = true; break; 1763 case AArch64::BI__builtin_neon_vqaddh_u16: 1764 ET = NeonTypeFlags::Int16; Int = Intrinsic::aarch64_neon_vqaddu; 1765 s = "vqaddu"; usgn = true; OverloadInt = true; break; 1766 case AArch64::BI__builtin_neon_vqadds_u32: 1767 ET = NeonTypeFlags::Int32; Int = Intrinsic::aarch64_neon_vqaddu; 1768 s = "vqaddu"; usgn = true; OverloadInt = true; break; 1769 case AArch64::BI__builtin_neon_vqaddd_u64: 1770 ET = NeonTypeFlags::Int64; Int = Intrinsic::aarch64_neon_vqaddu; 1771 s = "vqaddu"; usgn = true; OverloadInt = true; break; 1772 // Scalar Saturating Sub 1773 case AArch64::BI__builtin_neon_vqsubb_s8: 1774 ET = NeonTypeFlags::Int8; Int = Intrinsic::aarch64_neon_vqsubs; 1775 s = "vqsubs"; usgn = false; OverloadInt = true; break; 1776 case AArch64::BI__builtin_neon_vqsubh_s16: 1777 ET = NeonTypeFlags::Int16; Int = Intrinsic::aarch64_neon_vqsubs; 1778 s = "vqsubs"; usgn = false; OverloadInt = true; break; 1779 case AArch64::BI__builtin_neon_vqsubs_s32: 1780 ET = NeonTypeFlags::Int32; Int = Intrinsic::aarch64_neon_vqsubs; 1781 s = "vqsubs"; usgn = false; OverloadInt = true; break; 1782 case AArch64::BI__builtin_neon_vqsubd_s64: 1783 ET = NeonTypeFlags::Int64; Int = Intrinsic::aarch64_neon_vqsubs; 1784 s = "vqsubs"; usgn = false; OverloadInt = true; break; 1785 case AArch64::BI__builtin_neon_vqsubb_u8: 1786 ET = NeonTypeFlags::Int8; Int = Intrinsic::aarch64_neon_vqsubu; 1787 s = "vqsubu"; usgn = true; OverloadInt = true; break; 1788 case AArch64::BI__builtin_neon_vqsubh_u16: 1789 ET = NeonTypeFlags::Int16; Int = Intrinsic::aarch64_neon_vqsubu; 1790 s = "vqsubu"; usgn = true; OverloadInt = true; break; 1791 case AArch64::BI__builtin_neon_vqsubs_u32: 1792 ET = NeonTypeFlags::Int32; Int = Intrinsic::aarch64_neon_vqsubu; 1793 s = "vqsubu"; usgn = true; OverloadInt = true; break; 1794 case AArch64::BI__builtin_neon_vqsubd_u64: 1795 ET = NeonTypeFlags::Int64; Int = Intrinsic::aarch64_neon_vqsubu; 1796 s = "vqsubu"; usgn = true; OverloadInt = true; break; 1797 // Scalar Shift Left 1798 case AArch64::BI__builtin_neon_vshld_s64: 1799 ET = NeonTypeFlags::Int64; Int = Intrinsic::aarch64_neon_vshlds; 1800 s = "vshlds"; usgn = false; OverloadInt=false; break; 1801 case AArch64::BI__builtin_neon_vshld_u64: 1802 ET = NeonTypeFlags::Int64; Int = Intrinsic::aarch64_neon_vshldu; 1803 s = "vshldu"; usgn = true; OverloadInt = false; break; 1804 // Scalar Saturating Shift Left 1805 case AArch64::BI__builtin_neon_vqshlb_s8: 1806 ET = NeonTypeFlags::Int8; Int = Intrinsic::aarch64_neon_vqshls; 1807 s = "vqshls"; usgn = false; OverloadInt = true; break; 1808 case AArch64::BI__builtin_neon_vqshlh_s16: 1809 ET = NeonTypeFlags::Int16; Int = Intrinsic::aarch64_neon_vqshls; 1810 s = "vqshls"; usgn = false; OverloadInt = true; break; 1811 case AArch64::BI__builtin_neon_vqshls_s32: 1812 ET = NeonTypeFlags::Int32; Int = Intrinsic::aarch64_neon_vqshls; 1813 s = "vqshls"; usgn = false; OverloadInt = true; break; 1814 case AArch64::BI__builtin_neon_vqshld_s64: 1815 ET = NeonTypeFlags::Int64; Int = Intrinsic::aarch64_neon_vqshls; 1816 s = "vqshls"; usgn = false; OverloadInt = true; break; 1817 case AArch64::BI__builtin_neon_vqshlb_u8: 1818 ET = NeonTypeFlags::Int8; Int = Intrinsic::aarch64_neon_vqshlu; 1819 s = "vqshlu"; usgn = true; OverloadInt = true; break; 1820 case AArch64::BI__builtin_neon_vqshlh_u16: 1821 ET = NeonTypeFlags::Int16; Int = Intrinsic::aarch64_neon_vqshlu; 1822 s = "vqshlu"; usgn = true; OverloadInt = true; break; 1823 case AArch64::BI__builtin_neon_vqshls_u32: 1824 ET = NeonTypeFlags::Int32; Int = Intrinsic::aarch64_neon_vqshlu; 1825 s = "vqshlu"; usgn = true; OverloadInt = true; break; 1826 case AArch64::BI__builtin_neon_vqshld_u64: 1827 ET = NeonTypeFlags::Int64; Int = Intrinsic::aarch64_neon_vqshlu; 1828 s = "vqshlu"; usgn = true; OverloadInt = true; break; 1829 // Scalar Rouding Shift Left 1830 case AArch64::BI__builtin_neon_vrshld_s64: 1831 ET = NeonTypeFlags::Int64; Int = Intrinsic::aarch64_neon_vrshlds; 1832 s = "vrshlds"; usgn = false; OverloadInt=false; break; 1833 case AArch64::BI__builtin_neon_vrshld_u64: 1834 ET = NeonTypeFlags::Int64; Int = Intrinsic::aarch64_neon_vrshldu; 1835 s = "vrshldu"; usgn = true; OverloadInt=false; break; 1836 // Scalar Saturating Rouding Shift Left 1837 case AArch64::BI__builtin_neon_vqrshlb_s8: 1838 ET = NeonTypeFlags::Int8; Int = Intrinsic::aarch64_neon_vqrshls; 1839 s = "vqrshls"; usgn = false; OverloadInt = true; break; 1840 case AArch64::BI__builtin_neon_vqrshlh_s16: 1841 ET = NeonTypeFlags::Int16; Int = Intrinsic::aarch64_neon_vqrshls; 1842 s = "vqrshls"; usgn = false; OverloadInt = true; break; 1843 case AArch64::BI__builtin_neon_vqrshls_s32: 1844 ET = NeonTypeFlags::Int32; Int = Intrinsic::aarch64_neon_vqrshls; 1845 s = "vqrshls"; usgn = false; OverloadInt = true; break; 1846 case AArch64::BI__builtin_neon_vqrshld_s64: 1847 ET = NeonTypeFlags::Int64; Int = Intrinsic::aarch64_neon_vqrshls; 1848 s = "vqrshls"; usgn = false; OverloadInt = true; break; 1849 case AArch64::BI__builtin_neon_vqrshlb_u8: 1850 ET = NeonTypeFlags::Int8; Int = Intrinsic::aarch64_neon_vqrshlu; 1851 s = "vqrshlu"; usgn = true; OverloadInt = true; break; 1852 case AArch64::BI__builtin_neon_vqrshlh_u16: 1853 ET = NeonTypeFlags::Int16; Int = Intrinsic::aarch64_neon_vqrshlu; 1854 s = "vqrshlu"; usgn = true; OverloadInt = true; break; 1855 case AArch64::BI__builtin_neon_vqrshls_u32: 1856 ET = NeonTypeFlags::Int32; Int = Intrinsic::aarch64_neon_vqrshlu; 1857 s = "vqrshlu"; usgn = true; OverloadInt = true; break; 1858 case AArch64::BI__builtin_neon_vqrshld_u64: 1859 ET = NeonTypeFlags::Int64; Int = Intrinsic::aarch64_neon_vqrshlu; 1860 s = "vqrshlu"; usgn = true; OverloadInt = true; break; 1861 // Scalar Reduce Pairwise Add 1862 case AArch64::BI__builtin_neon_vpaddd_s64: 1863 Int = Intrinsic::aarch64_neon_vpadd; s = "vpadd"; 1864 OverloadInt = false; break; 1865 case AArch64::BI__builtin_neon_vpadds_f32: 1866 Int = Intrinsic::aarch64_neon_vpfadd; s = "vpfadd"; 1867 OverloadInt = false; break; 1868 case AArch64::BI__builtin_neon_vpaddd_f64: 1869 Int = Intrinsic::aarch64_neon_vpfaddq; s = "vpfaddq"; 1870 OverloadInt = false; break; 1871 // Scalar Reduce Pairwise Floating Point Max 1872 case AArch64::BI__builtin_neon_vpmaxs_f32: 1873 Int = Intrinsic::aarch64_neon_vpmax; s = "vpmax"; 1874 OverloadInt = false; break; 1875 case AArch64::BI__builtin_neon_vpmaxqd_f64: 1876 Int = Intrinsic::aarch64_neon_vpmaxq; s = "vpmaxq"; 1877 OverloadInt = false; break; 1878 // Scalar Reduce Pairwise Floating Point Min 1879 case AArch64::BI__builtin_neon_vpmins_f32: 1880 Int = Intrinsic::aarch64_neon_vpmin; s = "vpmin"; 1881 OverloadInt = false; break; 1882 case AArch64::BI__builtin_neon_vpminqd_f64: 1883 Int = Intrinsic::aarch64_neon_vpminq; s = "vpminq"; 1884 OverloadInt = false; break; 1885 // Scalar Reduce Pairwise Floating Point Maxnm 1886 case AArch64::BI__builtin_neon_vpmaxnms_f32: 1887 Int = Intrinsic::aarch64_neon_vpfmaxnm; s = "vpfmaxnm"; 1888 OverloadInt = false; break; 1889 case AArch64::BI__builtin_neon_vpmaxnmqd_f64: 1890 Int = Intrinsic::aarch64_neon_vpfmaxnmq; s = "vpfmaxnmq"; 1891 OverloadInt = false; break; 1892 // Scalar Reduce Pairwise Floating Point Minnm 1893 case AArch64::BI__builtin_neon_vpminnms_f32: 1894 Int = Intrinsic::aarch64_neon_vpfminnm; s = "vpfminnm"; 1895 OverloadInt = false; break; 1896 case AArch64::BI__builtin_neon_vpminnmqd_f64: 1897 Int = Intrinsic::aarch64_neon_vpfminnmq; s = "vpfminnmq"; 1898 OverloadInt = false; break; 1899 } 1900 1901 if (!Int) 1902 return 0; 1903 1904 // AArch64 scalar builtin that returns scalar type 1905 // and should be mapped to AArch64 intrinsic that takes 1906 // one-element vector type arguments and returns 1907 // one-element vector type. 1908 llvm::Type *Ty = 0; 1909 Function *F = 0; 1910 if (OverloadInt) { 1911 // Determine the type of this overloaded AArch64 intrinsic 1912 NeonTypeFlags Type(ET, usgn, false); 1913 llvm::VectorType *VTy = GetNeonType(&CGF, Type, true); 1914 Ty = VTy; 1915 if (!Ty) 1916 return 0; 1917 F = CGF.CGM.getIntrinsic(Int, Ty); 1918 } else 1919 F = CGF.CGM.getIntrinsic(Int); 1920 1921 Value *Result = CGF.EmitNeonCall(F, Ops, s); 1922 llvm::Type *ResultType = CGF.ConvertType(E->getType()); 1923 // AArch64 intrinsic one-element vector type cast to 1924 // scalar type expected by the builtin 1925 return CGF.Builder.CreateBitCast(Result, ResultType, s); 1926} 1927 1928Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, 1929 const CallExpr *E) { 1930 1931 // Process AArch64 scalar builtins 1932 if (Value *Result = EmitAArch64ScalarBuiltinExpr(*this, BuiltinID, E)) 1933 return Result; 1934 1935 if (BuiltinID == AArch64::BI__clear_cache) { 1936 assert(E->getNumArgs() == 2 && 1937 "Variadic __clear_cache slipped through on AArch64"); 1938 1939 const FunctionDecl *FD = E->getDirectCallee(); 1940 SmallVector<Value *, 2> Ops; 1941 for (unsigned i = 0; i < E->getNumArgs(); i++) 1942 Ops.push_back(EmitScalarExpr(E->getArg(i))); 1943 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType()); 1944 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty); 1945 StringRef Name = FD->getName(); 1946 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops); 1947 } 1948 1949 SmallVector<Value *, 4> Ops; 1950 for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) { 1951 Ops.push_back(EmitScalarExpr(E->getArg(i))); 1952 } 1953 1954 // Get the last argument, which specifies the vector type. 1955 llvm::APSInt Result; 1956 const Expr *Arg = E->getArg(E->getNumArgs() - 1); 1957 if (!Arg->isIntegerConstantExpr(Result, getContext())) 1958 return 0; 1959 1960 // Determine the type of this overloaded NEON intrinsic. 1961 NeonTypeFlags Type(Result.getZExtValue()); 1962 bool usgn = Type.isUnsigned(); 1963 1964 llvm::VectorType *VTy = GetNeonType(this, Type); 1965 llvm::Type *Ty = VTy; 1966 if (!Ty) 1967 return 0; 1968 1969 unsigned Int; 1970 switch (BuiltinID) { 1971 default: 1972 return 0; 1973 1974 // AArch64 builtins mapping to legacy ARM v7 builtins. 1975 // FIXME: the mapped builtins listed correspond to what has been tested 1976 // in aarch64-neon-intrinsics.c so far. 1977 case AArch64::BI__builtin_neon_vmul_v: 1978 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmul_v, E); 1979 case AArch64::BI__builtin_neon_vmulq_v: 1980 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmulq_v, E); 1981 case AArch64::BI__builtin_neon_vabd_v: 1982 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vabd_v, E); 1983 case AArch64::BI__builtin_neon_vabdq_v: 1984 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vabdq_v, E); 1985 case AArch64::BI__builtin_neon_vfma_v: 1986 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vfma_v, E); 1987 case AArch64::BI__builtin_neon_vfmaq_v: 1988 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vfmaq_v, E); 1989 case AArch64::BI__builtin_neon_vbsl_v: 1990 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vbsl_v, E); 1991 case AArch64::BI__builtin_neon_vbslq_v: 1992 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vbslq_v, E); 1993 case AArch64::BI__builtin_neon_vrsqrts_v: 1994 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrsqrts_v, E); 1995 case AArch64::BI__builtin_neon_vrsqrtsq_v: 1996 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrsqrtsq_v, E); 1997 case AArch64::BI__builtin_neon_vrecps_v: 1998 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrecps_v, E); 1999 case AArch64::BI__builtin_neon_vrecpsq_v: 2000 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrecpsq_v, E); 2001 case AArch64::BI__builtin_neon_vcage_v: 2002 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcage_v, E); 2003 case AArch64::BI__builtin_neon_vcale_v: 2004 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcale_v, E); 2005 case AArch64::BI__builtin_neon_vcaleq_v: 2006 std::swap(Ops[0], Ops[1]); 2007 case AArch64::BI__builtin_neon_vcageq_v: { 2008 Function *F; 2009 if (VTy->getElementType()->isIntegerTy(64)) 2010 F = CGM.getIntrinsic(Intrinsic::aarch64_neon_vacgeq); 2011 else 2012 F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgeq); 2013 return EmitNeonCall(F, Ops, "vcage"); 2014 } 2015 case AArch64::BI__builtin_neon_vcalt_v: 2016 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcalt_v, E); 2017 case AArch64::BI__builtin_neon_vcagt_v: 2018 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcagt_v, E); 2019 case AArch64::BI__builtin_neon_vcaltq_v: 2020 std::swap(Ops[0], Ops[1]); 2021 case AArch64::BI__builtin_neon_vcagtq_v: { 2022 Function *F; 2023 if (VTy->getElementType()->isIntegerTy(64)) 2024 F = CGM.getIntrinsic(Intrinsic::aarch64_neon_vacgtq); 2025 else 2026 F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtq); 2027 return EmitNeonCall(F, Ops, "vcagt"); 2028 } 2029 case AArch64::BI__builtin_neon_vtst_v: 2030 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vtst_v, E); 2031 case AArch64::BI__builtin_neon_vtstq_v: 2032 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vtstq_v, E); 2033 case AArch64::BI__builtin_neon_vhadd_v: 2034 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vhadd_v, E); 2035 case AArch64::BI__builtin_neon_vhaddq_v: 2036 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vhaddq_v, E); 2037 case AArch64::BI__builtin_neon_vhsub_v: 2038 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vhsub_v, E); 2039 case AArch64::BI__builtin_neon_vhsubq_v: 2040 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vhsubq_v, E); 2041 case AArch64::BI__builtin_neon_vrhadd_v: 2042 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrhadd_v, E); 2043 case AArch64::BI__builtin_neon_vrhaddq_v: 2044 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrhaddq_v, E); 2045 case AArch64::BI__builtin_neon_vqadd_v: 2046 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqadd_v, E); 2047 case AArch64::BI__builtin_neon_vqaddq_v: 2048 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqaddq_v, E); 2049 case AArch64::BI__builtin_neon_vqsub_v: 2050 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqsub_v, E); 2051 case AArch64::BI__builtin_neon_vqsubq_v: 2052 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqsubq_v, E); 2053 case AArch64::BI__builtin_neon_vshl_v: 2054 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshl_v, E); 2055 case AArch64::BI__builtin_neon_vshlq_v: 2056 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshlq_v, E); 2057 case AArch64::BI__builtin_neon_vqshl_v: 2058 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqshl_v, E); 2059 case AArch64::BI__builtin_neon_vqshlq_v: 2060 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqshlq_v, E); 2061 case AArch64::BI__builtin_neon_vrshl_v: 2062 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrshl_v, E); 2063 case AArch64::BI__builtin_neon_vrshlq_v: 2064 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrshlq_v, E); 2065 case AArch64::BI__builtin_neon_vqrshl_v: 2066 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqrshl_v, E); 2067 case AArch64::BI__builtin_neon_vqrshlq_v: 2068 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqrshlq_v, E); 2069 case AArch64::BI__builtin_neon_vaddhn_v: 2070 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vaddhn_v, E); 2071 case AArch64::BI__builtin_neon_vraddhn_v: 2072 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vraddhn_v, E); 2073 case AArch64::BI__builtin_neon_vsubhn_v: 2074 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vsubhn_v, E); 2075 case AArch64::BI__builtin_neon_vrsubhn_v: 2076 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrsubhn_v, E); 2077 case AArch64::BI__builtin_neon_vmull_v: 2078 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmull_v, E); 2079 case AArch64::BI__builtin_neon_vqdmull_v: 2080 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqdmull_v, E); 2081 case AArch64::BI__builtin_neon_vqdmlal_v: 2082 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqdmlal_v, E); 2083 case AArch64::BI__builtin_neon_vqdmlsl_v: 2084 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqdmlsl_v, E); 2085 case AArch64::BI__builtin_neon_vmax_v: 2086 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmax_v, E); 2087 case AArch64::BI__builtin_neon_vmaxq_v: 2088 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmaxq_v, E); 2089 case AArch64::BI__builtin_neon_vmin_v: 2090 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmin_v, E); 2091 case AArch64::BI__builtin_neon_vminq_v: 2092 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vminq_v, E); 2093 case AArch64::BI__builtin_neon_vpmax_v: 2094 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vpmax_v, E); 2095 case AArch64::BI__builtin_neon_vpmin_v: 2096 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vpmin_v, E); 2097 case AArch64::BI__builtin_neon_vpadd_v: 2098 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vpadd_v, E); 2099 case AArch64::BI__builtin_neon_vqdmulh_v: 2100 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqdmulh_v, E); 2101 case AArch64::BI__builtin_neon_vqdmulhq_v: 2102 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqdmulhq_v, E); 2103 case AArch64::BI__builtin_neon_vqrdmulh_v: 2104 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqrdmulh_v, E); 2105 case AArch64::BI__builtin_neon_vqrdmulhq_v: 2106 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqrdmulhq_v, E); 2107 2108 // Shift by immediate 2109 case AArch64::BI__builtin_neon_vshr_n_v: 2110 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshr_n_v, E); 2111 case AArch64::BI__builtin_neon_vshrq_n_v: 2112 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshrq_n_v, E); 2113 case AArch64::BI__builtin_neon_vrshr_n_v: 2114 case AArch64::BI__builtin_neon_vrshrq_n_v: 2115 Int = usgn ? Intrinsic::aarch64_neon_vurshr 2116 : Intrinsic::aarch64_neon_vsrshr; 2117 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n"); 2118 case AArch64::BI__builtin_neon_vsra_n_v: 2119 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vsra_n_v, E); 2120 case AArch64::BI__builtin_neon_vsraq_n_v: 2121 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vsraq_n_v, E); 2122 case AArch64::BI__builtin_neon_vrsra_n_v: 2123 case AArch64::BI__builtin_neon_vrsraq_n_v: { 2124 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 2125 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 2126 Int = usgn ? Intrinsic::aarch64_neon_vurshr 2127 : Intrinsic::aarch64_neon_vsrshr; 2128 Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Ty), Ops[1], Ops[2]); 2129 return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n"); 2130 } 2131 case AArch64::BI__builtin_neon_vshl_n_v: 2132 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshl_n_v, E); 2133 case AArch64::BI__builtin_neon_vshlq_n_v: 2134 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshlq_n_v, E); 2135 case AArch64::BI__builtin_neon_vqshl_n_v: 2136 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqshl_n_v, E); 2137 case AArch64::BI__builtin_neon_vqshlq_n_v: 2138 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqshlq_n_v, E); 2139 case AArch64::BI__builtin_neon_vqshlu_n_v: 2140 case AArch64::BI__builtin_neon_vqshluq_n_v: 2141 Int = Intrinsic::aarch64_neon_vsqshlu; 2142 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n"); 2143 case AArch64::BI__builtin_neon_vsri_n_v: 2144 case AArch64::BI__builtin_neon_vsriq_n_v: 2145 Int = Intrinsic::aarch64_neon_vsri; 2146 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsri_n"); 2147 case AArch64::BI__builtin_neon_vsli_n_v: 2148 case AArch64::BI__builtin_neon_vsliq_n_v: 2149 Int = Intrinsic::aarch64_neon_vsli; 2150 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsli_n"); 2151 case AArch64::BI__builtin_neon_vshll_n_v: { 2152 llvm::Type *SrcTy = llvm::VectorType::getTruncatedElementVectorType(VTy); 2153 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy); 2154 if (usgn) 2155 Ops[0] = Builder.CreateZExt(Ops[0], VTy); 2156 else 2157 Ops[0] = Builder.CreateSExt(Ops[0], VTy); 2158 Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false); 2159 return Builder.CreateShl(Ops[0], Ops[1], "vshll_n"); 2160 } 2161 case AArch64::BI__builtin_neon_vshrn_n_v: { 2162 llvm::Type *SrcTy = llvm::VectorType::getExtendedElementVectorType(VTy); 2163 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy); 2164 Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false); 2165 if (usgn) 2166 Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]); 2167 else 2168 Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]); 2169 return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n"); 2170 } 2171 case AArch64::BI__builtin_neon_vqshrun_n_v: 2172 Int = Intrinsic::aarch64_neon_vsqshrun; 2173 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n"); 2174 case AArch64::BI__builtin_neon_vrshrn_n_v: 2175 Int = Intrinsic::aarch64_neon_vrshrn; 2176 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n"); 2177 case AArch64::BI__builtin_neon_vqrshrun_n_v: 2178 Int = Intrinsic::aarch64_neon_vsqrshrun; 2179 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n"); 2180 case AArch64::BI__builtin_neon_vqshrn_n_v: 2181 Int = usgn ? Intrinsic::aarch64_neon_vuqshrn 2182 : Intrinsic::aarch64_neon_vsqshrn; 2183 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n"); 2184 case AArch64::BI__builtin_neon_vqrshrn_n_v: 2185 Int = usgn ? Intrinsic::aarch64_neon_vuqrshrn 2186 : Intrinsic::aarch64_neon_vsqrshrn; 2187 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n"); 2188 2189 // Convert 2190 case AArch64::BI__builtin_neon_vmovl_v: 2191 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmovl_v, E); 2192 case AArch64::BI__builtin_neon_vcvt_n_f32_v: 2193 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvt_n_f32_v, E); 2194 case AArch64::BI__builtin_neon_vcvtq_n_f32_v: 2195 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvtq_n_f32_v, E); 2196 case AArch64::BI__builtin_neon_vcvtq_n_f64_v: { 2197 llvm::Type *FloatTy = 2198 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, true)); 2199 llvm::Type *Tys[2] = { FloatTy, Ty }; 2200 Int = usgn ? Intrinsic::arm_neon_vcvtfxu2fp 2201 : Intrinsic::arm_neon_vcvtfxs2fp; 2202 Function *F = CGM.getIntrinsic(Int, Tys); 2203 return EmitNeonCall(F, Ops, "vcvt_n"); 2204 } 2205 case AArch64::BI__builtin_neon_vcvt_n_s32_v: 2206 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvt_n_s32_v, E); 2207 case AArch64::BI__builtin_neon_vcvtq_n_s32_v: 2208 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvtq_n_s32_v, E); 2209 case AArch64::BI__builtin_neon_vcvt_n_u32_v: 2210 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvt_n_u32_v, E); 2211 case AArch64::BI__builtin_neon_vcvtq_n_u32_v: 2212 return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvtq_n_u32_v, E); 2213 case AArch64::BI__builtin_neon_vcvtq_n_s64_v: 2214 case AArch64::BI__builtin_neon_vcvtq_n_u64_v: { 2215 llvm::Type *FloatTy = 2216 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, true)); 2217 llvm::Type *Tys[2] = { Ty, FloatTy }; 2218 Int = usgn ? Intrinsic::arm_neon_vcvtfp2fxu 2219 : Intrinsic::arm_neon_vcvtfp2fxs; 2220 Function *F = CGM.getIntrinsic(Int, Tys); 2221 return EmitNeonCall(F, Ops, "vcvt_n"); 2222 } 2223 2224 // AArch64-only builtins 2225 case AArch64::BI__builtin_neon_vfms_v: 2226 case AArch64::BI__builtin_neon_vfmsq_v: { 2227 Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty); 2228 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 2229 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 2230 Ops[1] = Builder.CreateFNeg(Ops[1]); 2231 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 2232 2233 // LLVM's fma intrinsic puts the accumulator in the last position, but the 2234 // AArch64 intrinsic has it first. 2235 return Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]); 2236 } 2237 case AArch64::BI__builtin_neon_vmaxnm_v: 2238 case AArch64::BI__builtin_neon_vmaxnmq_v: { 2239 Int = Intrinsic::aarch64_neon_vmaxnm; 2240 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm"); 2241 } 2242 case AArch64::BI__builtin_neon_vminnm_v: 2243 case AArch64::BI__builtin_neon_vminnmq_v: { 2244 Int = Intrinsic::aarch64_neon_vminnm; 2245 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm"); 2246 } 2247 case AArch64::BI__builtin_neon_vpmaxnm_v: 2248 case AArch64::BI__builtin_neon_vpmaxnmq_v: { 2249 Int = Intrinsic::aarch64_neon_vpmaxnm; 2250 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm"); 2251 } 2252 case AArch64::BI__builtin_neon_vpminnm_v: 2253 case AArch64::BI__builtin_neon_vpminnmq_v: { 2254 Int = Intrinsic::aarch64_neon_vpminnm; 2255 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm"); 2256 } 2257 case AArch64::BI__builtin_neon_vpmaxq_v: { 2258 Int = usgn ? Intrinsic::arm_neon_vpmaxu : Intrinsic::arm_neon_vpmaxs; 2259 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax"); 2260 } 2261 case AArch64::BI__builtin_neon_vpminq_v: { 2262 Int = usgn ? Intrinsic::arm_neon_vpminu : Intrinsic::arm_neon_vpmins; 2263 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin"); 2264 } 2265 case AArch64::BI__builtin_neon_vpaddq_v: { 2266 Int = Intrinsic::arm_neon_vpadd; 2267 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpadd"); 2268 } 2269 case AArch64::BI__builtin_neon_vmulx_v: 2270 case AArch64::BI__builtin_neon_vmulxq_v: { 2271 Int = Intrinsic::aarch64_neon_vmulx; 2272 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx"); 2273 } 2274 } 2275} 2276 2277Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, 2278 const CallExpr *E) { 2279 if (BuiltinID == ARM::BI__clear_cache) { 2280 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments"); 2281 const FunctionDecl *FD = E->getDirectCallee(); 2282 SmallVector<Value*, 2> Ops; 2283 for (unsigned i = 0; i < 2; i++) 2284 Ops.push_back(EmitScalarExpr(E->getArg(i))); 2285 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType()); 2286 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty); 2287 StringRef Name = FD->getName(); 2288 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops); 2289 } 2290 2291 if (BuiltinID == ARM::BI__builtin_arm_ldrexd || 2292 (BuiltinID == ARM::BI__builtin_arm_ldrex && 2293 getContext().getTypeSize(E->getType()) == 64)) { 2294 Function *F = CGM.getIntrinsic(Intrinsic::arm_ldrexd); 2295 2296 Value *LdPtr = EmitScalarExpr(E->getArg(0)); 2297 Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy), 2298 "ldrexd"); 2299 2300 Value *Val0 = Builder.CreateExtractValue(Val, 1); 2301 Value *Val1 = Builder.CreateExtractValue(Val, 0); 2302 Val0 = Builder.CreateZExt(Val0, Int64Ty); 2303 Val1 = Builder.CreateZExt(Val1, Int64Ty); 2304 2305 Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32); 2306 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */); 2307 Val = Builder.CreateOr(Val, Val1); 2308 return Builder.CreateBitCast(Val, ConvertType(E->getType())); 2309 } 2310 2311 if (BuiltinID == ARM::BI__builtin_arm_ldrex) { 2312 Value *LoadAddr = EmitScalarExpr(E->getArg(0)); 2313 2314 QualType Ty = E->getType(); 2315 llvm::Type *RealResTy = ConvertType(Ty); 2316 llvm::Type *IntResTy = llvm::IntegerType::get(getLLVMContext(), 2317 getContext().getTypeSize(Ty)); 2318 LoadAddr = Builder.CreateBitCast(LoadAddr, IntResTy->getPointerTo()); 2319 2320 Function *F = CGM.getIntrinsic(Intrinsic::arm_ldrex, LoadAddr->getType()); 2321 Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex"); 2322 2323 if (RealResTy->isPointerTy()) 2324 return Builder.CreateIntToPtr(Val, RealResTy); 2325 else { 2326 Val = Builder.CreateTruncOrBitCast(Val, IntResTy); 2327 return Builder.CreateBitCast(Val, RealResTy); 2328 } 2329 } 2330 2331 if (BuiltinID == ARM::BI__builtin_arm_strexd || 2332 (BuiltinID == ARM::BI__builtin_arm_strex && 2333 getContext().getTypeSize(E->getArg(0)->getType()) == 64)) { 2334 Function *F = CGM.getIntrinsic(Intrinsic::arm_strexd); 2335 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, NULL); 2336 2337 Value *Tmp = CreateMemTemp(E->getArg(0)->getType()); 2338 Value *Val = EmitScalarExpr(E->getArg(0)); 2339 Builder.CreateStore(Val, Tmp); 2340 2341 Value *LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy)); 2342 Val = Builder.CreateLoad(LdPtr); 2343 2344 Value *Arg0 = Builder.CreateExtractValue(Val, 0); 2345 Value *Arg1 = Builder.CreateExtractValue(Val, 1); 2346 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy); 2347 return Builder.CreateCall3(F, Arg0, Arg1, StPtr, "strexd"); 2348 } 2349 2350 if (BuiltinID == ARM::BI__builtin_arm_strex) { 2351 Value *StoreVal = EmitScalarExpr(E->getArg(0)); 2352 Value *StoreAddr = EmitScalarExpr(E->getArg(1)); 2353 2354 QualType Ty = E->getArg(0)->getType(); 2355 llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(), 2356 getContext().getTypeSize(Ty)); 2357 StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo()); 2358 2359 if (StoreVal->getType()->isPointerTy()) 2360 StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty); 2361 else { 2362 StoreVal = Builder.CreateBitCast(StoreVal, StoreTy); 2363 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty); 2364 } 2365 2366 Function *F = CGM.getIntrinsic(Intrinsic::arm_strex, StoreAddr->getType()); 2367 return Builder.CreateCall2(F, StoreVal, StoreAddr, "strex"); 2368 } 2369 2370 if (BuiltinID == ARM::BI__builtin_arm_clrex) { 2371 Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex); 2372 return Builder.CreateCall(F); 2373 } 2374 2375 // CRC32 2376 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic; 2377 switch (BuiltinID) { 2378 case ARM::BI__builtin_arm_crc32b: 2379 CRCIntrinsicID = Intrinsic::arm_crc32b; break; 2380 case ARM::BI__builtin_arm_crc32cb: 2381 CRCIntrinsicID = Intrinsic::arm_crc32cb; break; 2382 case ARM::BI__builtin_arm_crc32h: 2383 CRCIntrinsicID = Intrinsic::arm_crc32h; break; 2384 case ARM::BI__builtin_arm_crc32ch: 2385 CRCIntrinsicID = Intrinsic::arm_crc32ch; break; 2386 case ARM::BI__builtin_arm_crc32w: 2387 case ARM::BI__builtin_arm_crc32d: 2388 CRCIntrinsicID = Intrinsic::arm_crc32w; break; 2389 case ARM::BI__builtin_arm_crc32cw: 2390 case ARM::BI__builtin_arm_crc32cd: 2391 CRCIntrinsicID = Intrinsic::arm_crc32cw; break; 2392 } 2393 2394 if (CRCIntrinsicID != Intrinsic::not_intrinsic) { 2395 Value *Arg0 = EmitScalarExpr(E->getArg(0)); 2396 Value *Arg1 = EmitScalarExpr(E->getArg(1)); 2397 2398 // crc32{c,}d intrinsics are implemnted as two calls to crc32{c,}w 2399 // intrinsics, hence we need different codegen for these cases. 2400 if (BuiltinID == ARM::BI__builtin_arm_crc32d || 2401 BuiltinID == ARM::BI__builtin_arm_crc32cd) { 2402 Value *C1 = llvm::ConstantInt::get(Int64Ty, 32); 2403 Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty); 2404 Value *Arg1b = Builder.CreateLShr(Arg1, C1); 2405 Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty); 2406 2407 Function *F = CGM.getIntrinsic(CRCIntrinsicID); 2408 Value *Res = Builder.CreateCall2(F, Arg0, Arg1a); 2409 return Builder.CreateCall2(F, Res, Arg1b); 2410 } else { 2411 Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty); 2412 2413 Function *F = CGM.getIntrinsic(CRCIntrinsicID); 2414 return Builder.CreateCall2(F, Arg0, Arg1); 2415 } 2416 } 2417 2418 SmallVector<Value*, 4> Ops; 2419 llvm::Value *Align = 0; 2420 for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) { 2421 if (i == 0) { 2422 switch (BuiltinID) { 2423 case ARM::BI__builtin_neon_vld1_v: 2424 case ARM::BI__builtin_neon_vld1q_v: 2425 case ARM::BI__builtin_neon_vld1q_lane_v: 2426 case ARM::BI__builtin_neon_vld1_lane_v: 2427 case ARM::BI__builtin_neon_vld1_dup_v: 2428 case ARM::BI__builtin_neon_vld1q_dup_v: 2429 case ARM::BI__builtin_neon_vst1_v: 2430 case ARM::BI__builtin_neon_vst1q_v: 2431 case ARM::BI__builtin_neon_vst1q_lane_v: 2432 case ARM::BI__builtin_neon_vst1_lane_v: 2433 case ARM::BI__builtin_neon_vst2_v: 2434 case ARM::BI__builtin_neon_vst2q_v: 2435 case ARM::BI__builtin_neon_vst2_lane_v: 2436 case ARM::BI__builtin_neon_vst2q_lane_v: 2437 case ARM::BI__builtin_neon_vst3_v: 2438 case ARM::BI__builtin_neon_vst3q_v: 2439 case ARM::BI__builtin_neon_vst3_lane_v: 2440 case ARM::BI__builtin_neon_vst3q_lane_v: 2441 case ARM::BI__builtin_neon_vst4_v: 2442 case ARM::BI__builtin_neon_vst4q_v: 2443 case ARM::BI__builtin_neon_vst4_lane_v: 2444 case ARM::BI__builtin_neon_vst4q_lane_v: 2445 // Get the alignment for the argument in addition to the value; 2446 // we'll use it later. 2447 std::pair<llvm::Value*, unsigned> Src = 2448 EmitPointerWithAlignment(E->getArg(0)); 2449 Ops.push_back(Src.first); 2450 Align = Builder.getInt32(Src.second); 2451 continue; 2452 } 2453 } 2454 if (i == 1) { 2455 switch (BuiltinID) { 2456 case ARM::BI__builtin_neon_vld2_v: 2457 case ARM::BI__builtin_neon_vld2q_v: 2458 case ARM::BI__builtin_neon_vld3_v: 2459 case ARM::BI__builtin_neon_vld3q_v: 2460 case ARM::BI__builtin_neon_vld4_v: 2461 case ARM::BI__builtin_neon_vld4q_v: 2462 case ARM::BI__builtin_neon_vld2_lane_v: 2463 case ARM::BI__builtin_neon_vld2q_lane_v: 2464 case ARM::BI__builtin_neon_vld3_lane_v: 2465 case ARM::BI__builtin_neon_vld3q_lane_v: 2466 case ARM::BI__builtin_neon_vld4_lane_v: 2467 case ARM::BI__builtin_neon_vld4q_lane_v: 2468 case ARM::BI__builtin_neon_vld2_dup_v: 2469 case ARM::BI__builtin_neon_vld3_dup_v: 2470 case ARM::BI__builtin_neon_vld4_dup_v: 2471 // Get the alignment for the argument in addition to the value; 2472 // we'll use it later. 2473 std::pair<llvm::Value*, unsigned> Src = 2474 EmitPointerWithAlignment(E->getArg(1)); 2475 Ops.push_back(Src.first); 2476 Align = Builder.getInt32(Src.second); 2477 continue; 2478 } 2479 } 2480 Ops.push_back(EmitScalarExpr(E->getArg(i))); 2481 } 2482 2483 // vget_lane and vset_lane are not overloaded and do not have an extra 2484 // argument that specifies the vector type. 2485 switch (BuiltinID) { 2486 default: break; 2487 case ARM::BI__builtin_neon_vget_lane_i8: 2488 case ARM::BI__builtin_neon_vget_lane_i16: 2489 case ARM::BI__builtin_neon_vget_lane_i32: 2490 case ARM::BI__builtin_neon_vget_lane_i64: 2491 case ARM::BI__builtin_neon_vget_lane_f32: 2492 case ARM::BI__builtin_neon_vgetq_lane_i8: 2493 case ARM::BI__builtin_neon_vgetq_lane_i16: 2494 case ARM::BI__builtin_neon_vgetq_lane_i32: 2495 case ARM::BI__builtin_neon_vgetq_lane_i64: 2496 case ARM::BI__builtin_neon_vgetq_lane_f32: 2497 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), 2498 "vget_lane"); 2499 case ARM::BI__builtin_neon_vset_lane_i8: 2500 case ARM::BI__builtin_neon_vset_lane_i16: 2501 case ARM::BI__builtin_neon_vset_lane_i32: 2502 case ARM::BI__builtin_neon_vset_lane_i64: 2503 case ARM::BI__builtin_neon_vset_lane_f32: 2504 case ARM::BI__builtin_neon_vsetq_lane_i8: 2505 case ARM::BI__builtin_neon_vsetq_lane_i16: 2506 case ARM::BI__builtin_neon_vsetq_lane_i32: 2507 case ARM::BI__builtin_neon_vsetq_lane_i64: 2508 case ARM::BI__builtin_neon_vsetq_lane_f32: 2509 Ops.push_back(EmitScalarExpr(E->getArg(2))); 2510 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane"); 2511 } 2512 2513 // Get the last argument, which specifies the vector type. 2514 llvm::APSInt Result; 2515 const Expr *Arg = E->getArg(E->getNumArgs()-1); 2516 if (!Arg->isIntegerConstantExpr(Result, getContext())) 2517 return 0; 2518 2519 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f || 2520 BuiltinID == ARM::BI__builtin_arm_vcvtr_d) { 2521 // Determine the overloaded type of this builtin. 2522 llvm::Type *Ty; 2523 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f) 2524 Ty = FloatTy; 2525 else 2526 Ty = DoubleTy; 2527 2528 // Determine whether this is an unsigned conversion or not. 2529 bool usgn = Result.getZExtValue() == 1; 2530 unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr; 2531 2532 // Call the appropriate intrinsic. 2533 Function *F = CGM.getIntrinsic(Int, Ty); 2534 return Builder.CreateCall(F, Ops, "vcvtr"); 2535 } 2536 2537 // Determine the type of this overloaded NEON intrinsic. 2538 NeonTypeFlags Type(Result.getZExtValue()); 2539 bool usgn = Type.isUnsigned(); 2540 bool quad = Type.isQuad(); 2541 bool rightShift = false; 2542 2543 llvm::VectorType *VTy = GetNeonType(this, Type); 2544 llvm::Type *Ty = VTy; 2545 if (!Ty) 2546 return 0; 2547 2548 unsigned Int; 2549 switch (BuiltinID) { 2550 default: return 0; 2551 case ARM::BI__builtin_neon_vbsl_v: 2552 case ARM::BI__builtin_neon_vbslq_v: 2553 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vbsl, Ty), 2554 Ops, "vbsl"); 2555 case ARM::BI__builtin_neon_vabd_v: 2556 case ARM::BI__builtin_neon_vabdq_v: 2557 Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds; 2558 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd"); 2559 case ARM::BI__builtin_neon_vabs_v: 2560 case ARM::BI__builtin_neon_vabsq_v: 2561 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vabs, Ty), 2562 Ops, "vabs"); 2563 case ARM::BI__builtin_neon_vaddhn_v: { 2564 llvm::VectorType *SrcTy = 2565 llvm::VectorType::getExtendedElementVectorType(VTy); 2566 2567 // %sum = add <4 x i32> %lhs, %rhs 2568 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy); 2569 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy); 2570 Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn"); 2571 2572 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16> 2573 Constant *ShiftAmt = ConstantInt::get(SrcTy->getElementType(), 2574 SrcTy->getScalarSizeInBits() / 2); 2575 ShiftAmt = ConstantVector::getSplat(VTy->getNumElements(), ShiftAmt); 2576 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn"); 2577 2578 // %res = trunc <4 x i32> %high to <4 x i16> 2579 return Builder.CreateTrunc(Ops[0], VTy, "vaddhn"); 2580 } 2581 case ARM::BI__builtin_neon_vcale_v: 2582 std::swap(Ops[0], Ops[1]); 2583 case ARM::BI__builtin_neon_vcage_v: { 2584 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacged); 2585 return EmitNeonCall(F, Ops, "vcage"); 2586 } 2587 case ARM::BI__builtin_neon_vcaleq_v: 2588 std::swap(Ops[0], Ops[1]); 2589 case ARM::BI__builtin_neon_vcageq_v: { 2590 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgeq); 2591 return EmitNeonCall(F, Ops, "vcage"); 2592 } 2593 case ARM::BI__builtin_neon_vcalt_v: 2594 std::swap(Ops[0], Ops[1]); 2595 case ARM::BI__builtin_neon_vcagt_v: { 2596 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtd); 2597 return EmitNeonCall(F, Ops, "vcagt"); 2598 } 2599 case ARM::BI__builtin_neon_vcaltq_v: 2600 std::swap(Ops[0], Ops[1]); 2601 case ARM::BI__builtin_neon_vcagtq_v: { 2602 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtq); 2603 return EmitNeonCall(F, Ops, "vcagt"); 2604 } 2605 case ARM::BI__builtin_neon_vcls_v: 2606 case ARM::BI__builtin_neon_vclsq_v: { 2607 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcls, Ty); 2608 return EmitNeonCall(F, Ops, "vcls"); 2609 } 2610 case ARM::BI__builtin_neon_vclz_v: 2611 case ARM::BI__builtin_neon_vclzq_v: { 2612 // Generate target-independent intrinsic; also need to add second argument 2613 // for whether or not clz of zero is undefined; on ARM it isn't. 2614 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ty); 2615 Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef())); 2616 return EmitNeonCall(F, Ops, "vclz"); 2617 } 2618 case ARM::BI__builtin_neon_vcnt_v: 2619 case ARM::BI__builtin_neon_vcntq_v: { 2620 // generate target-independent intrinsic 2621 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, Ty); 2622 return EmitNeonCall(F, Ops, "vctpop"); 2623 } 2624 case ARM::BI__builtin_neon_vcvt_f16_v: { 2625 assert(Type.getEltType() == NeonTypeFlags::Float16 && !quad && 2626 "unexpected vcvt_f16_v builtin"); 2627 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcvtfp2hf); 2628 return EmitNeonCall(F, Ops, "vcvt"); 2629 } 2630 case ARM::BI__builtin_neon_vcvt_f32_f16: { 2631 assert(Type.getEltType() == NeonTypeFlags::Float16 && !quad && 2632 "unexpected vcvt_f32_f16 builtin"); 2633 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcvthf2fp); 2634 return EmitNeonCall(F, Ops, "vcvt"); 2635 } 2636 case ARM::BI__builtin_neon_vcvt_f32_v: 2637 case ARM::BI__builtin_neon_vcvtq_f32_v: 2638 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 2639 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad)); 2640 return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt") 2641 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt"); 2642 case ARM::BI__builtin_neon_vcvt_s32_v: 2643 case ARM::BI__builtin_neon_vcvt_u32_v: 2644 case ARM::BI__builtin_neon_vcvtq_s32_v: 2645 case ARM::BI__builtin_neon_vcvtq_u32_v: { 2646 llvm::Type *FloatTy = 2647 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad)); 2648 Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy); 2649 return usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt") 2650 : Builder.CreateFPToSI(Ops[0], Ty, "vcvt"); 2651 } 2652 case ARM::BI__builtin_neon_vcvt_n_f32_v: 2653 case ARM::BI__builtin_neon_vcvtq_n_f32_v: { 2654 llvm::Type *FloatTy = 2655 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad)); 2656 llvm::Type *Tys[2] = { FloatTy, Ty }; 2657 Int = usgn ? Intrinsic::arm_neon_vcvtfxu2fp 2658 : Intrinsic::arm_neon_vcvtfxs2fp; 2659 Function *F = CGM.getIntrinsic(Int, Tys); 2660 return EmitNeonCall(F, Ops, "vcvt_n"); 2661 } 2662 case ARM::BI__builtin_neon_vcvt_n_s32_v: 2663 case ARM::BI__builtin_neon_vcvt_n_u32_v: 2664 case ARM::BI__builtin_neon_vcvtq_n_s32_v: 2665 case ARM::BI__builtin_neon_vcvtq_n_u32_v: { 2666 llvm::Type *FloatTy = 2667 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad)); 2668 llvm::Type *Tys[2] = { Ty, FloatTy }; 2669 Int = usgn ? Intrinsic::arm_neon_vcvtfp2fxu 2670 : Intrinsic::arm_neon_vcvtfp2fxs; 2671 Function *F = CGM.getIntrinsic(Int, Tys); 2672 return EmitNeonCall(F, Ops, "vcvt_n"); 2673 } 2674 case ARM::BI__builtin_neon_vext_v: 2675 case ARM::BI__builtin_neon_vextq_v: { 2676 int CV = cast<ConstantInt>(Ops[2])->getSExtValue(); 2677 SmallVector<Constant*, 16> Indices; 2678 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) 2679 Indices.push_back(ConstantInt::get(Int32Ty, i+CV)); 2680 2681 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 2682 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 2683 Value *SV = llvm::ConstantVector::get(Indices); 2684 return Builder.CreateShuffleVector(Ops[0], Ops[1], SV, "vext"); 2685 } 2686 case ARM::BI__builtin_neon_vhadd_v: 2687 case ARM::BI__builtin_neon_vhaddq_v: 2688 Int = usgn ? Intrinsic::arm_neon_vhaddu : Intrinsic::arm_neon_vhadds; 2689 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vhadd"); 2690 case ARM::BI__builtin_neon_vhsub_v: 2691 case ARM::BI__builtin_neon_vhsubq_v: 2692 Int = usgn ? Intrinsic::arm_neon_vhsubu : Intrinsic::arm_neon_vhsubs; 2693 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vhsub"); 2694 case ARM::BI__builtin_neon_vld1_v: 2695 case ARM::BI__builtin_neon_vld1q_v: 2696 Ops.push_back(Align); 2697 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty), 2698 Ops, "vld1"); 2699 case ARM::BI__builtin_neon_vld1q_lane_v: 2700 // Handle 64-bit integer elements as a special case. Use shuffles of 2701 // one-element vectors to avoid poor code for i64 in the backend. 2702 if (VTy->getElementType()->isIntegerTy(64)) { 2703 // Extract the other lane. 2704 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 2705 int Lane = cast<ConstantInt>(Ops[2])->getZExtValue(); 2706 Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane)); 2707 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV); 2708 // Load the value as a one-element vector. 2709 Ty = llvm::VectorType::get(VTy->getElementType(), 1); 2710 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty); 2711 Value *Ld = Builder.CreateCall2(F, Ops[0], Align); 2712 // Combine them. 2713 SmallVector<Constant*, 2> Indices; 2714 Indices.push_back(ConstantInt::get(Int32Ty, 1-Lane)); 2715 Indices.push_back(ConstantInt::get(Int32Ty, Lane)); 2716 SV = llvm::ConstantVector::get(Indices); 2717 return Builder.CreateShuffleVector(Ops[1], Ld, SV, "vld1q_lane"); 2718 } 2719 // fall through 2720 case ARM::BI__builtin_neon_vld1_lane_v: { 2721 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 2722 Ty = llvm::PointerType::getUnqual(VTy->getElementType()); 2723 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 2724 LoadInst *Ld = Builder.CreateLoad(Ops[0]); 2725 Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue()); 2726 return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane"); 2727 } 2728 case ARM::BI__builtin_neon_vld1_dup_v: 2729 case ARM::BI__builtin_neon_vld1q_dup_v: { 2730 Value *V = UndefValue::get(Ty); 2731 Ty = llvm::PointerType::getUnqual(VTy->getElementType()); 2732 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 2733 LoadInst *Ld = Builder.CreateLoad(Ops[0]); 2734 Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue()); 2735 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0); 2736 Ops[0] = Builder.CreateInsertElement(V, Ld, CI); 2737 return EmitNeonSplat(Ops[0], CI); 2738 } 2739 case ARM::BI__builtin_neon_vld2_v: 2740 case ARM::BI__builtin_neon_vld2q_v: { 2741 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2, Ty); 2742 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld2"); 2743 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 2744 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 2745 return Builder.CreateStore(Ops[1], Ops[0]); 2746 } 2747 case ARM::BI__builtin_neon_vld3_v: 2748 case ARM::BI__builtin_neon_vld3q_v: { 2749 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3, Ty); 2750 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld3"); 2751 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 2752 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 2753 return Builder.CreateStore(Ops[1], Ops[0]); 2754 } 2755 case ARM::BI__builtin_neon_vld4_v: 2756 case ARM::BI__builtin_neon_vld4q_v: { 2757 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4, Ty); 2758 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld4"); 2759 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 2760 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 2761 return Builder.CreateStore(Ops[1], Ops[0]); 2762 } 2763 case ARM::BI__builtin_neon_vld2_lane_v: 2764 case ARM::BI__builtin_neon_vld2q_lane_v: { 2765 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2lane, Ty); 2766 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 2767 Ops[3] = Builder.CreateBitCast(Ops[3], Ty); 2768 Ops.push_back(Align); 2769 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane"); 2770 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 2771 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 2772 return Builder.CreateStore(Ops[1], Ops[0]); 2773 } 2774 case ARM::BI__builtin_neon_vld3_lane_v: 2775 case ARM::BI__builtin_neon_vld3q_lane_v: { 2776 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3lane, Ty); 2777 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 2778 Ops[3] = Builder.CreateBitCast(Ops[3], Ty); 2779 Ops[4] = Builder.CreateBitCast(Ops[4], Ty); 2780 Ops.push_back(Align); 2781 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane"); 2782 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 2783 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 2784 return Builder.CreateStore(Ops[1], Ops[0]); 2785 } 2786 case ARM::BI__builtin_neon_vld4_lane_v: 2787 case ARM::BI__builtin_neon_vld4q_lane_v: { 2788 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4lane, Ty); 2789 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 2790 Ops[3] = Builder.CreateBitCast(Ops[3], Ty); 2791 Ops[4] = Builder.CreateBitCast(Ops[4], Ty); 2792 Ops[5] = Builder.CreateBitCast(Ops[5], Ty); 2793 Ops.push_back(Align); 2794 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane"); 2795 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 2796 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 2797 return Builder.CreateStore(Ops[1], Ops[0]); 2798 } 2799 case ARM::BI__builtin_neon_vld2_dup_v: 2800 case ARM::BI__builtin_neon_vld3_dup_v: 2801 case ARM::BI__builtin_neon_vld4_dup_v: { 2802 // Handle 64-bit elements as a special-case. There is no "dup" needed. 2803 if (VTy->getElementType()->getPrimitiveSizeInBits() == 64) { 2804 switch (BuiltinID) { 2805 case ARM::BI__builtin_neon_vld2_dup_v: 2806 Int = Intrinsic::arm_neon_vld2; 2807 break; 2808 case ARM::BI__builtin_neon_vld3_dup_v: 2809 Int = Intrinsic::arm_neon_vld3; 2810 break; 2811 case ARM::BI__builtin_neon_vld4_dup_v: 2812 Int = Intrinsic::arm_neon_vld4; 2813 break; 2814 default: llvm_unreachable("unknown vld_dup intrinsic?"); 2815 } 2816 Function *F = CGM.getIntrinsic(Int, Ty); 2817 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld_dup"); 2818 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 2819 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 2820 return Builder.CreateStore(Ops[1], Ops[0]); 2821 } 2822 switch (BuiltinID) { 2823 case ARM::BI__builtin_neon_vld2_dup_v: 2824 Int = Intrinsic::arm_neon_vld2lane; 2825 break; 2826 case ARM::BI__builtin_neon_vld3_dup_v: 2827 Int = Intrinsic::arm_neon_vld3lane; 2828 break; 2829 case ARM::BI__builtin_neon_vld4_dup_v: 2830 Int = Intrinsic::arm_neon_vld4lane; 2831 break; 2832 default: llvm_unreachable("unknown vld_dup intrinsic?"); 2833 } 2834 Function *F = CGM.getIntrinsic(Int, Ty); 2835 llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType()); 2836 2837 SmallVector<Value*, 6> Args; 2838 Args.push_back(Ops[1]); 2839 Args.append(STy->getNumElements(), UndefValue::get(Ty)); 2840 2841 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0); 2842 Args.push_back(CI); 2843 Args.push_back(Align); 2844 2845 Ops[1] = Builder.CreateCall(F, Args, "vld_dup"); 2846 // splat lane 0 to all elts in each vector of the result. 2847 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2848 Value *Val = Builder.CreateExtractValue(Ops[1], i); 2849 Value *Elt = Builder.CreateBitCast(Val, Ty); 2850 Elt = EmitNeonSplat(Elt, CI); 2851 Elt = Builder.CreateBitCast(Elt, Val->getType()); 2852 Ops[1] = Builder.CreateInsertValue(Ops[1], Elt, i); 2853 } 2854 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 2855 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 2856 return Builder.CreateStore(Ops[1], Ops[0]); 2857 } 2858 case ARM::BI__builtin_neon_vmax_v: 2859 case ARM::BI__builtin_neon_vmaxq_v: 2860 Int = usgn ? Intrinsic::arm_neon_vmaxu : Intrinsic::arm_neon_vmaxs; 2861 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax"); 2862 case ARM::BI__builtin_neon_vmin_v: 2863 case ARM::BI__builtin_neon_vminq_v: 2864 Int = usgn ? Intrinsic::arm_neon_vminu : Intrinsic::arm_neon_vmins; 2865 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin"); 2866 case ARM::BI__builtin_neon_vmovl_v: { 2867 llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy); 2868 Ops[0] = Builder.CreateBitCast(Ops[0], DTy); 2869 if (usgn) 2870 return Builder.CreateZExt(Ops[0], Ty, "vmovl"); 2871 return Builder.CreateSExt(Ops[0], Ty, "vmovl"); 2872 } 2873 case ARM::BI__builtin_neon_vmovn_v: { 2874 llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy); 2875 Ops[0] = Builder.CreateBitCast(Ops[0], QTy); 2876 return Builder.CreateTrunc(Ops[0], Ty, "vmovn"); 2877 } 2878 case ARM::BI__builtin_neon_vmul_v: 2879 case ARM::BI__builtin_neon_vmulq_v: 2880 assert(Type.isPoly() && "vmul builtin only supported for polynomial types"); 2881 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmulp, Ty), 2882 Ops, "vmul"); 2883 case ARM::BI__builtin_neon_vmull_v: 2884 // FIXME: the integer vmull operations could be emitted in terms of pure 2885 // LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of 2886 // hoisting the exts outside loops. Until global ISel comes along that can 2887 // see through such movement this leads to bad CodeGen. So we need an 2888 // intrinsic for now. 2889 Int = usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls; 2890 Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int; 2891 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull"); 2892 case ARM::BI__builtin_neon_vfma_v: 2893 case ARM::BI__builtin_neon_vfmaq_v: { 2894 Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty); 2895 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 2896 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 2897 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 2898 2899 // NEON intrinsic puts accumulator first, unlike the LLVM fma. 2900 return Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]); 2901 } 2902 case ARM::BI__builtin_neon_vpadal_v: 2903 case ARM::BI__builtin_neon_vpadalq_v: { 2904 Int = usgn ? Intrinsic::arm_neon_vpadalu : Intrinsic::arm_neon_vpadals; 2905 // The source operand type has twice as many elements of half the size. 2906 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); 2907 llvm::Type *EltTy = 2908 llvm::IntegerType::get(getLLVMContext(), EltBits / 2); 2909 llvm::Type *NarrowTy = 2910 llvm::VectorType::get(EltTy, VTy->getNumElements() * 2); 2911 llvm::Type *Tys[2] = { Ty, NarrowTy }; 2912 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpadal"); 2913 } 2914 case ARM::BI__builtin_neon_vpadd_v: 2915 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vpadd, Ty), 2916 Ops, "vpadd"); 2917 case ARM::BI__builtin_neon_vpaddl_v: 2918 case ARM::BI__builtin_neon_vpaddlq_v: { 2919 Int = usgn ? Intrinsic::arm_neon_vpaddlu : Intrinsic::arm_neon_vpaddls; 2920 // The source operand type has twice as many elements of half the size. 2921 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); 2922 llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2); 2923 llvm::Type *NarrowTy = 2924 llvm::VectorType::get(EltTy, VTy->getNumElements() * 2); 2925 llvm::Type *Tys[2] = { Ty, NarrowTy }; 2926 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl"); 2927 } 2928 case ARM::BI__builtin_neon_vpmax_v: 2929 Int = usgn ? Intrinsic::arm_neon_vpmaxu : Intrinsic::arm_neon_vpmaxs; 2930 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax"); 2931 case ARM::BI__builtin_neon_vpmin_v: 2932 Int = usgn ? Intrinsic::arm_neon_vpminu : Intrinsic::arm_neon_vpmins; 2933 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin"); 2934 case ARM::BI__builtin_neon_vqabs_v: 2935 case ARM::BI__builtin_neon_vqabsq_v: 2936 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqabs, Ty), 2937 Ops, "vqabs"); 2938 case ARM::BI__builtin_neon_vqadd_v: 2939 case ARM::BI__builtin_neon_vqaddq_v: 2940 Int = usgn ? Intrinsic::arm_neon_vqaddu : Intrinsic::arm_neon_vqadds; 2941 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqadd"); 2942 case ARM::BI__builtin_neon_vqdmlal_v: { 2943 SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end()); 2944 Value *Mul = EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, Ty), 2945 MulOps, "vqdmlal"); 2946 2947 SmallVector<Value *, 2> AddOps; 2948 AddOps.push_back(Ops[0]); 2949 AddOps.push_back(Mul); 2950 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqadds, Ty), 2951 AddOps, "vqdmlal"); 2952 } 2953 case ARM::BI__builtin_neon_vqdmlsl_v: { 2954 SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end()); 2955 Value *Mul = EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, Ty), 2956 MulOps, "vqdmlsl"); 2957 2958 SmallVector<Value *, 2> SubOps; 2959 SubOps.push_back(Ops[0]); 2960 SubOps.push_back(Mul); 2961 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqsubs, Ty), 2962 SubOps, "vqdmlsl"); 2963 } 2964 case ARM::BI__builtin_neon_vqdmulh_v: 2965 case ARM::BI__builtin_neon_vqdmulhq_v: 2966 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmulh, Ty), 2967 Ops, "vqdmulh"); 2968 case ARM::BI__builtin_neon_vqdmull_v: 2969 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, Ty), 2970 Ops, "vqdmull"); 2971 case ARM::BI__builtin_neon_vqmovn_v: 2972 Int = usgn ? Intrinsic::arm_neon_vqmovnu : Intrinsic::arm_neon_vqmovns; 2973 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqmovn"); 2974 case ARM::BI__builtin_neon_vqmovun_v: 2975 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqmovnsu, Ty), 2976 Ops, "vqdmull"); 2977 case ARM::BI__builtin_neon_vqneg_v: 2978 case ARM::BI__builtin_neon_vqnegq_v: 2979 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqneg, Ty), 2980 Ops, "vqneg"); 2981 case ARM::BI__builtin_neon_vqrdmulh_v: 2982 case ARM::BI__builtin_neon_vqrdmulhq_v: 2983 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrdmulh, Ty), 2984 Ops, "vqrdmulh"); 2985 case ARM::BI__builtin_neon_vqrshl_v: 2986 case ARM::BI__builtin_neon_vqrshlq_v: 2987 Int = usgn ? Intrinsic::arm_neon_vqrshiftu : Intrinsic::arm_neon_vqrshifts; 2988 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshl"); 2989 case ARM::BI__builtin_neon_vqrshrn_n_v: 2990 Int = 2991 usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns; 2992 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n", 2993 1, true); 2994 case ARM::BI__builtin_neon_vqrshrun_n_v: 2995 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty), 2996 Ops, "vqrshrun_n", 1, true); 2997 case ARM::BI__builtin_neon_vqshl_v: 2998 case ARM::BI__builtin_neon_vqshlq_v: 2999 Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts; 3000 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl"); 3001 case ARM::BI__builtin_neon_vqshl_n_v: 3002 case ARM::BI__builtin_neon_vqshlq_n_v: 3003 Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts; 3004 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n", 3005 1, false); 3006 case ARM::BI__builtin_neon_vqshlu_n_v: 3007 case ARM::BI__builtin_neon_vqshluq_n_v: 3008 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftsu, Ty), 3009 Ops, "vqshlu", 1, false); 3010 case ARM::BI__builtin_neon_vqshrn_n_v: 3011 Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns; 3012 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n", 3013 1, true); 3014 case ARM::BI__builtin_neon_vqshrun_n_v: 3015 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty), 3016 Ops, "vqshrun_n", 1, true); 3017 case ARM::BI__builtin_neon_vqsub_v: 3018 case ARM::BI__builtin_neon_vqsubq_v: 3019 Int = usgn ? Intrinsic::arm_neon_vqsubu : Intrinsic::arm_neon_vqsubs; 3020 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqsub"); 3021 case ARM::BI__builtin_neon_vraddhn_v: 3022 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vraddhn, Ty), 3023 Ops, "vraddhn"); 3024 case ARM::BI__builtin_neon_vrecpe_v: 3025 case ARM::BI__builtin_neon_vrecpeq_v: 3026 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty), 3027 Ops, "vrecpe"); 3028 case ARM::BI__builtin_neon_vrecps_v: 3029 case ARM::BI__builtin_neon_vrecpsq_v: 3030 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecps, Ty), 3031 Ops, "vrecps"); 3032 case ARM::BI__builtin_neon_vrhadd_v: 3033 case ARM::BI__builtin_neon_vrhaddq_v: 3034 Int = usgn ? Intrinsic::arm_neon_vrhaddu : Intrinsic::arm_neon_vrhadds; 3035 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrhadd"); 3036 case ARM::BI__builtin_neon_vrshl_v: 3037 case ARM::BI__builtin_neon_vrshlq_v: 3038 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts; 3039 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshl"); 3040 case ARM::BI__builtin_neon_vrshrn_n_v: 3041 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty), 3042 Ops, "vrshrn_n", 1, true); 3043 case ARM::BI__builtin_neon_vrshr_n_v: 3044 case ARM::BI__builtin_neon_vrshrq_n_v: 3045 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts; 3046 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n", 1, true); 3047 case ARM::BI__builtin_neon_vrsqrte_v: 3048 case ARM::BI__builtin_neon_vrsqrteq_v: 3049 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrte, Ty), 3050 Ops, "vrsqrte"); 3051 case ARM::BI__builtin_neon_vrsqrts_v: 3052 case ARM::BI__builtin_neon_vrsqrtsq_v: 3053 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrts, Ty), 3054 Ops, "vrsqrts"); 3055 case ARM::BI__builtin_neon_vrsra_n_v: 3056 case ARM::BI__builtin_neon_vrsraq_n_v: 3057 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 3058 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 3059 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true); 3060 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts; 3061 Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Ty), Ops[1], Ops[2]); 3062 return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n"); 3063 case ARM::BI__builtin_neon_vrsubhn_v: 3064 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsubhn, Ty), 3065 Ops, "vrsubhn"); 3066 case ARM::BI__builtin_neon_vshl_v: 3067 case ARM::BI__builtin_neon_vshlq_v: 3068 Int = usgn ? Intrinsic::arm_neon_vshiftu : Intrinsic::arm_neon_vshifts; 3069 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vshl"); 3070 case ARM::BI__builtin_neon_vshll_n_v: 3071 Int = usgn ? Intrinsic::arm_neon_vshiftlu : Intrinsic::arm_neon_vshiftls; 3072 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vshll", 1); 3073 case ARM::BI__builtin_neon_vshl_n_v: 3074 case ARM::BI__builtin_neon_vshlq_n_v: 3075 Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false); 3076 return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1], 3077 "vshl_n"); 3078 case ARM::BI__builtin_neon_vshrn_n_v: 3079 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftn, Ty), 3080 Ops, "vshrn_n", 1, true); 3081 case ARM::BI__builtin_neon_vshr_n_v: 3082 case ARM::BI__builtin_neon_vshrq_n_v: 3083 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 3084 Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false); 3085 if (usgn) 3086 return Builder.CreateLShr(Ops[0], Ops[1], "vshr_n"); 3087 else 3088 return Builder.CreateAShr(Ops[0], Ops[1], "vshr_n"); 3089 case ARM::BI__builtin_neon_vsri_n_v: 3090 case ARM::BI__builtin_neon_vsriq_n_v: 3091 rightShift = true; 3092 case ARM::BI__builtin_neon_vsli_n_v: 3093 case ARM::BI__builtin_neon_vsliq_n_v: 3094 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift); 3095 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty), 3096 Ops, "vsli_n"); 3097 case ARM::BI__builtin_neon_vsra_n_v: 3098 case ARM::BI__builtin_neon_vsraq_n_v: 3099 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 3100 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 3101 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, false); 3102 if (usgn) 3103 Ops[1] = Builder.CreateLShr(Ops[1], Ops[2], "vsra_n"); 3104 else 3105 Ops[1] = Builder.CreateAShr(Ops[1], Ops[2], "vsra_n"); 3106 return Builder.CreateAdd(Ops[0], Ops[1]); 3107 case ARM::BI__builtin_neon_vst1_v: 3108 case ARM::BI__builtin_neon_vst1q_v: 3109 Ops.push_back(Align); 3110 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, Ty), 3111 Ops, ""); 3112 case ARM::BI__builtin_neon_vst1q_lane_v: 3113 // Handle 64-bit integer elements as a special case. Use a shuffle to get 3114 // a one-element vector and avoid poor code for i64 in the backend. 3115 if (VTy->getElementType()->isIntegerTy(64)) { 3116 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 3117 Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2])); 3118 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV); 3119 Ops[2] = Align; 3120 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, 3121 Ops[1]->getType()), Ops); 3122 } 3123 // fall through 3124 case ARM::BI__builtin_neon_vst1_lane_v: { 3125 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 3126 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]); 3127 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 3128 StoreInst *St = Builder.CreateStore(Ops[1], 3129 Builder.CreateBitCast(Ops[0], Ty)); 3130 St->setAlignment(cast<ConstantInt>(Align)->getZExtValue()); 3131 return St; 3132 } 3133 case ARM::BI__builtin_neon_vst2_v: 3134 case ARM::BI__builtin_neon_vst2q_v: 3135 Ops.push_back(Align); 3136 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2, Ty), 3137 Ops, ""); 3138 case ARM::BI__builtin_neon_vst2_lane_v: 3139 case ARM::BI__builtin_neon_vst2q_lane_v: 3140 Ops.push_back(Align); 3141 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2lane, Ty), 3142 Ops, ""); 3143 case ARM::BI__builtin_neon_vst3_v: 3144 case ARM::BI__builtin_neon_vst3q_v: 3145 Ops.push_back(Align); 3146 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3, Ty), 3147 Ops, ""); 3148 case ARM::BI__builtin_neon_vst3_lane_v: 3149 case ARM::BI__builtin_neon_vst3q_lane_v: 3150 Ops.push_back(Align); 3151 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3lane, Ty), 3152 Ops, ""); 3153 case ARM::BI__builtin_neon_vst4_v: 3154 case ARM::BI__builtin_neon_vst4q_v: 3155 Ops.push_back(Align); 3156 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4, Ty), 3157 Ops, ""); 3158 case ARM::BI__builtin_neon_vst4_lane_v: 3159 case ARM::BI__builtin_neon_vst4q_lane_v: 3160 Ops.push_back(Align); 3161 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4lane, Ty), 3162 Ops, ""); 3163 case ARM::BI__builtin_neon_vsubhn_v: { 3164 llvm::VectorType *SrcTy = 3165 llvm::VectorType::getExtendedElementVectorType(VTy); 3166 3167 // %sum = add <4 x i32> %lhs, %rhs 3168 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy); 3169 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy); 3170 Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn"); 3171 3172 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16> 3173 Constant *ShiftAmt = ConstantInt::get(SrcTy->getElementType(), 3174 SrcTy->getScalarSizeInBits() / 2); 3175 ShiftAmt = ConstantVector::getSplat(VTy->getNumElements(), ShiftAmt); 3176 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn"); 3177 3178 // %res = trunc <4 x i32> %high to <4 x i16> 3179 return Builder.CreateTrunc(Ops[0], VTy, "vsubhn"); 3180 } 3181 case ARM::BI__builtin_neon_vtbl1_v: 3182 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1), 3183 Ops, "vtbl1"); 3184 case ARM::BI__builtin_neon_vtbl2_v: 3185 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2), 3186 Ops, "vtbl2"); 3187 case ARM::BI__builtin_neon_vtbl3_v: 3188 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3), 3189 Ops, "vtbl3"); 3190 case ARM::BI__builtin_neon_vtbl4_v: 3191 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4), 3192 Ops, "vtbl4"); 3193 case ARM::BI__builtin_neon_vtbx1_v: 3194 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1), 3195 Ops, "vtbx1"); 3196 case ARM::BI__builtin_neon_vtbx2_v: 3197 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2), 3198 Ops, "vtbx2"); 3199 case ARM::BI__builtin_neon_vtbx3_v: 3200 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3), 3201 Ops, "vtbx3"); 3202 case ARM::BI__builtin_neon_vtbx4_v: 3203 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4), 3204 Ops, "vtbx4"); 3205 case ARM::BI__builtin_neon_vtst_v: 3206 case ARM::BI__builtin_neon_vtstq_v: { 3207 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 3208 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 3209 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]); 3210 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0], 3211 ConstantAggregateZero::get(Ty)); 3212 return Builder.CreateSExt(Ops[0], Ty, "vtst"); 3213 } 3214 case ARM::BI__builtin_neon_vtrn_v: 3215 case ARM::BI__builtin_neon_vtrnq_v: { 3216 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); 3217 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 3218 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 3219 Value *SV = 0; 3220 3221 for (unsigned vi = 0; vi != 2; ++vi) { 3222 SmallVector<Constant*, 16> Indices; 3223 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { 3224 Indices.push_back(Builder.getInt32(i+vi)); 3225 Indices.push_back(Builder.getInt32(i+e+vi)); 3226 } 3227 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi); 3228 SV = llvm::ConstantVector::get(Indices); 3229 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn"); 3230 SV = Builder.CreateStore(SV, Addr); 3231 } 3232 return SV; 3233 } 3234 case ARM::BI__builtin_neon_vuzp_v: 3235 case ARM::BI__builtin_neon_vuzpq_v: { 3236 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); 3237 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 3238 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 3239 Value *SV = 0; 3240 3241 for (unsigned vi = 0; vi != 2; ++vi) { 3242 SmallVector<Constant*, 16> Indices; 3243 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) 3244 Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi)); 3245 3246 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi); 3247 SV = llvm::ConstantVector::get(Indices); 3248 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp"); 3249 SV = Builder.CreateStore(SV, Addr); 3250 } 3251 return SV; 3252 } 3253 case ARM::BI__builtin_neon_vzip_v: 3254 case ARM::BI__builtin_neon_vzipq_v: { 3255 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); 3256 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 3257 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 3258 Value *SV = 0; 3259 3260 for (unsigned vi = 0; vi != 2; ++vi) { 3261 SmallVector<Constant*, 16> Indices; 3262 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { 3263 Indices.push_back(ConstantInt::get(Int32Ty, (i + vi*e) >> 1)); 3264 Indices.push_back(ConstantInt::get(Int32Ty, ((i + vi*e) >> 1)+e)); 3265 } 3266 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi); 3267 SV = llvm::ConstantVector::get(Indices); 3268 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip"); 3269 SV = Builder.CreateStore(SV, Addr); 3270 } 3271 return SV; 3272 } 3273 } 3274} 3275 3276llvm::Value *CodeGenFunction:: 3277BuildVector(ArrayRef<llvm::Value*> Ops) { 3278 assert((Ops.size() & (Ops.size() - 1)) == 0 && 3279 "Not a power-of-two sized vector!"); 3280 bool AllConstants = true; 3281 for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i) 3282 AllConstants &= isa<Constant>(Ops[i]); 3283 3284 // If this is a constant vector, create a ConstantVector. 3285 if (AllConstants) { 3286 SmallVector<llvm::Constant*, 16> CstOps; 3287 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3288 CstOps.push_back(cast<Constant>(Ops[i])); 3289 return llvm::ConstantVector::get(CstOps); 3290 } 3291 3292 // Otherwise, insertelement the values to build the vector. 3293 Value *Result = 3294 llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), Ops.size())); 3295 3296 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3297 Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i)); 3298 3299 return Result; 3300} 3301 3302Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, 3303 const CallExpr *E) { 3304 SmallVector<Value*, 4> Ops; 3305 3306 // Find out if any arguments are required to be integer constant expressions. 3307 unsigned ICEArguments = 0; 3308 ASTContext::GetBuiltinTypeError Error; 3309 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); 3310 assert(Error == ASTContext::GE_None && "Should not codegen an error"); 3311 3312 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) { 3313 // If this is a normal argument, just emit it as a scalar. 3314 if ((ICEArguments & (1 << i)) == 0) { 3315 Ops.push_back(EmitScalarExpr(E->getArg(i))); 3316 continue; 3317 } 3318 3319 // If this is required to be a constant, constant fold it so that we know 3320 // that the generated intrinsic gets a ConstantInt. 3321 llvm::APSInt Result; 3322 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext()); 3323 assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst; 3324 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result)); 3325 } 3326 3327 switch (BuiltinID) { 3328 default: return 0; 3329 case X86::BI__builtin_ia32_vec_init_v8qi: 3330 case X86::BI__builtin_ia32_vec_init_v4hi: 3331 case X86::BI__builtin_ia32_vec_init_v2si: 3332 return Builder.CreateBitCast(BuildVector(Ops), 3333 llvm::Type::getX86_MMXTy(getLLVMContext())); 3334 case X86::BI__builtin_ia32_vec_ext_v2si: 3335 return Builder.CreateExtractElement(Ops[0], 3336 llvm::ConstantInt::get(Ops[1]->getType(), 0)); 3337 case X86::BI__builtin_ia32_ldmxcsr: { 3338 Value *Tmp = CreateMemTemp(E->getArg(0)->getType()); 3339 Builder.CreateStore(Ops[0], Tmp); 3340 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr), 3341 Builder.CreateBitCast(Tmp, Int8PtrTy)); 3342 } 3343 case X86::BI__builtin_ia32_stmxcsr: { 3344 Value *Tmp = CreateMemTemp(E->getType()); 3345 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr), 3346 Builder.CreateBitCast(Tmp, Int8PtrTy)); 3347 return Builder.CreateLoad(Tmp, "stmxcsr"); 3348 } 3349 case X86::BI__builtin_ia32_storehps: 3350 case X86::BI__builtin_ia32_storelps: { 3351 llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty); 3352 llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2); 3353 3354 // cast val v2i64 3355 Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast"); 3356 3357 // extract (0, 1) 3358 unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1; 3359 llvm::Value *Idx = llvm::ConstantInt::get(Int32Ty, Index); 3360 Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract"); 3361 3362 // cast pointer to i64 & store 3363 Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy); 3364 return Builder.CreateStore(Ops[1], Ops[0]); 3365 } 3366 case X86::BI__builtin_ia32_palignr: { 3367 unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); 3368 3369 // If palignr is shifting the pair of input vectors less than 9 bytes, 3370 // emit a shuffle instruction. 3371 if (shiftVal <= 8) { 3372 SmallVector<llvm::Constant*, 8> Indices; 3373 for (unsigned i = 0; i != 8; ++i) 3374 Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i)); 3375 3376 Value* SV = llvm::ConstantVector::get(Indices); 3377 return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr"); 3378 } 3379 3380 // If palignr is shifting the pair of input vectors more than 8 but less 3381 // than 16 bytes, emit a logical right shift of the destination. 3382 if (shiftVal < 16) { 3383 // MMX has these as 1 x i64 vectors for some odd optimization reasons. 3384 llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 1); 3385 3386 Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast"); 3387 Ops[1] = llvm::ConstantInt::get(VecTy, (shiftVal-8) * 8); 3388 3389 // create i32 constant 3390 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_mmx_psrl_q); 3391 return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr"); 3392 } 3393 3394 // If palignr is shifting the pair of vectors more than 16 bytes, emit zero. 3395 return llvm::Constant::getNullValue(ConvertType(E->getType())); 3396 } 3397 case X86::BI__builtin_ia32_palignr128: { 3398 unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); 3399 3400 // If palignr is shifting the pair of input vectors less than 17 bytes, 3401 // emit a shuffle instruction. 3402 if (shiftVal <= 16) { 3403 SmallVector<llvm::Constant*, 16> Indices; 3404 for (unsigned i = 0; i != 16; ++i) 3405 Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i)); 3406 3407 Value* SV = llvm::ConstantVector::get(Indices); 3408 return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr"); 3409 } 3410 3411 // If palignr is shifting the pair of input vectors more than 16 but less 3412 // than 32 bytes, emit a logical right shift of the destination. 3413 if (shiftVal < 32) { 3414 llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2); 3415 3416 Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast"); 3417 Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8); 3418 3419 // create i32 constant 3420 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq); 3421 return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr"); 3422 } 3423 3424 // If palignr is shifting the pair of vectors more than 32 bytes, emit zero. 3425 return llvm::Constant::getNullValue(ConvertType(E->getType())); 3426 } 3427 case X86::BI__builtin_ia32_palignr256: { 3428 unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); 3429 3430 // If palignr is shifting the pair of input vectors less than 17 bytes, 3431 // emit a shuffle instruction. 3432 if (shiftVal <= 16) { 3433 SmallVector<llvm::Constant*, 32> Indices; 3434 // 256-bit palignr operates on 128-bit lanes so we need to handle that 3435 for (unsigned l = 0; l != 2; ++l) { 3436 unsigned LaneStart = l * 16; 3437 unsigned LaneEnd = (l+1) * 16; 3438 for (unsigned i = 0; i != 16; ++i) { 3439 unsigned Idx = shiftVal + i + LaneStart; 3440 if (Idx >= LaneEnd) Idx += 16; // end of lane, switch operand 3441 Indices.push_back(llvm::ConstantInt::get(Int32Ty, Idx)); 3442 } 3443 } 3444 3445 Value* SV = llvm::ConstantVector::get(Indices); 3446 return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr"); 3447 } 3448 3449 // If palignr is shifting the pair of input vectors more than 16 but less 3450 // than 32 bytes, emit a logical right shift of the destination. 3451 if (shiftVal < 32) { 3452 llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 4); 3453 3454 Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast"); 3455 Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8); 3456 3457 // create i32 constant 3458 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_avx2_psrl_dq); 3459 return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr"); 3460 } 3461 3462 // If palignr is shifting the pair of vectors more than 32 bytes, emit zero. 3463 return llvm::Constant::getNullValue(ConvertType(E->getType())); 3464 } 3465 case X86::BI__builtin_ia32_movntps: 3466 case X86::BI__builtin_ia32_movntps256: 3467 case X86::BI__builtin_ia32_movntpd: 3468 case X86::BI__builtin_ia32_movntpd256: 3469 case X86::BI__builtin_ia32_movntdq: 3470 case X86::BI__builtin_ia32_movntdq256: 3471 case X86::BI__builtin_ia32_movnti: 3472 case X86::BI__builtin_ia32_movnti64: { 3473 llvm::MDNode *Node = llvm::MDNode::get(getLLVMContext(), 3474 Builder.getInt32(1)); 3475 3476 // Convert the type of the pointer to a pointer to the stored type. 3477 Value *BC = Builder.CreateBitCast(Ops[0], 3478 llvm::PointerType::getUnqual(Ops[1]->getType()), 3479 "cast"); 3480 StoreInst *SI = Builder.CreateStore(Ops[1], BC); 3481 SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node); 3482 3483 // If the operand is an integer, we can't assume alignment. Otherwise, 3484 // assume natural alignment. 3485 QualType ArgTy = E->getArg(1)->getType(); 3486 unsigned Align; 3487 if (ArgTy->isIntegerType()) 3488 Align = 1; 3489 else 3490 Align = getContext().getTypeSizeInChars(ArgTy).getQuantity(); 3491 SI->setAlignment(Align); 3492 return SI; 3493 } 3494 // 3DNow! 3495 case X86::BI__builtin_ia32_pswapdsf: 3496 case X86::BI__builtin_ia32_pswapdsi: { 3497 const char *name = 0; 3498 Intrinsic::ID ID = Intrinsic::not_intrinsic; 3499 switch(BuiltinID) { 3500 default: llvm_unreachable("Unsupported intrinsic!"); 3501 case X86::BI__builtin_ia32_pswapdsf: 3502 case X86::BI__builtin_ia32_pswapdsi: 3503 name = "pswapd"; 3504 ID = Intrinsic::x86_3dnowa_pswapd; 3505 break; 3506 } 3507 llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext()); 3508 Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast"); 3509 llvm::Function *F = CGM.getIntrinsic(ID); 3510 return Builder.CreateCall(F, Ops, name); 3511 } 3512 case X86::BI__builtin_ia32_rdrand16_step: 3513 case X86::BI__builtin_ia32_rdrand32_step: 3514 case X86::BI__builtin_ia32_rdrand64_step: 3515 case X86::BI__builtin_ia32_rdseed16_step: 3516 case X86::BI__builtin_ia32_rdseed32_step: 3517 case X86::BI__builtin_ia32_rdseed64_step: { 3518 Intrinsic::ID ID; 3519 switch (BuiltinID) { 3520 default: llvm_unreachable("Unsupported intrinsic!"); 3521 case X86::BI__builtin_ia32_rdrand16_step: 3522 ID = Intrinsic::x86_rdrand_16; 3523 break; 3524 case X86::BI__builtin_ia32_rdrand32_step: 3525 ID = Intrinsic::x86_rdrand_32; 3526 break; 3527 case X86::BI__builtin_ia32_rdrand64_step: 3528 ID = Intrinsic::x86_rdrand_64; 3529 break; 3530 case X86::BI__builtin_ia32_rdseed16_step: 3531 ID = Intrinsic::x86_rdseed_16; 3532 break; 3533 case X86::BI__builtin_ia32_rdseed32_step: 3534 ID = Intrinsic::x86_rdseed_32; 3535 break; 3536 case X86::BI__builtin_ia32_rdseed64_step: 3537 ID = Intrinsic::x86_rdseed_64; 3538 break; 3539 } 3540 3541 Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID)); 3542 Builder.CreateStore(Builder.CreateExtractValue(Call, 0), Ops[0]); 3543 return Builder.CreateExtractValue(Call, 1); 3544 } 3545 // AVX2 broadcast 3546 case X86::BI__builtin_ia32_vbroadcastsi256: { 3547 Value *VecTmp = CreateMemTemp(E->getArg(0)->getType()); 3548 Builder.CreateStore(Ops[0], VecTmp); 3549 Value *F = CGM.getIntrinsic(Intrinsic::x86_avx2_vbroadcasti128); 3550 return Builder.CreateCall(F, Builder.CreateBitCast(VecTmp, Int8PtrTy)); 3551 } 3552 } 3553} 3554 3555 3556Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID, 3557 const CallExpr *E) { 3558 SmallVector<Value*, 4> Ops; 3559 3560 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) 3561 Ops.push_back(EmitScalarExpr(E->getArg(i))); 3562 3563 Intrinsic::ID ID = Intrinsic::not_intrinsic; 3564 3565 switch (BuiltinID) { 3566 default: return 0; 3567 3568 // vec_ld, vec_lvsl, vec_lvsr 3569 case PPC::BI__builtin_altivec_lvx: 3570 case PPC::BI__builtin_altivec_lvxl: 3571 case PPC::BI__builtin_altivec_lvebx: 3572 case PPC::BI__builtin_altivec_lvehx: 3573 case PPC::BI__builtin_altivec_lvewx: 3574 case PPC::BI__builtin_altivec_lvsl: 3575 case PPC::BI__builtin_altivec_lvsr: 3576 { 3577 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy); 3578 3579 Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]); 3580 Ops.pop_back(); 3581 3582 switch (BuiltinID) { 3583 default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!"); 3584 case PPC::BI__builtin_altivec_lvx: 3585 ID = Intrinsic::ppc_altivec_lvx; 3586 break; 3587 case PPC::BI__builtin_altivec_lvxl: 3588 ID = Intrinsic::ppc_altivec_lvxl; 3589 break; 3590 case PPC::BI__builtin_altivec_lvebx: 3591 ID = Intrinsic::ppc_altivec_lvebx; 3592 break; 3593 case PPC::BI__builtin_altivec_lvehx: 3594 ID = Intrinsic::ppc_altivec_lvehx; 3595 break; 3596 case PPC::BI__builtin_altivec_lvewx: 3597 ID = Intrinsic::ppc_altivec_lvewx; 3598 break; 3599 case PPC::BI__builtin_altivec_lvsl: 3600 ID = Intrinsic::ppc_altivec_lvsl; 3601 break; 3602 case PPC::BI__builtin_altivec_lvsr: 3603 ID = Intrinsic::ppc_altivec_lvsr; 3604 break; 3605 } 3606 llvm::Function *F = CGM.getIntrinsic(ID); 3607 return Builder.CreateCall(F, Ops, ""); 3608 } 3609 3610 // vec_st 3611 case PPC::BI__builtin_altivec_stvx: 3612 case PPC::BI__builtin_altivec_stvxl: 3613 case PPC::BI__builtin_altivec_stvebx: 3614 case PPC::BI__builtin_altivec_stvehx: 3615 case PPC::BI__builtin_altivec_stvewx: 3616 { 3617 Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy); 3618 Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]); 3619 Ops.pop_back(); 3620 3621 switch (BuiltinID) { 3622 default: llvm_unreachable("Unsupported st intrinsic!"); 3623 case PPC::BI__builtin_altivec_stvx: 3624 ID = Intrinsic::ppc_altivec_stvx; 3625 break; 3626 case PPC::BI__builtin_altivec_stvxl: 3627 ID = Intrinsic::ppc_altivec_stvxl; 3628 break; 3629 case PPC::BI__builtin_altivec_stvebx: 3630 ID = Intrinsic::ppc_altivec_stvebx; 3631 break; 3632 case PPC::BI__builtin_altivec_stvehx: 3633 ID = Intrinsic::ppc_altivec_stvehx; 3634 break; 3635 case PPC::BI__builtin_altivec_stvewx: 3636 ID = Intrinsic::ppc_altivec_stvewx; 3637 break; 3638 } 3639 llvm::Function *F = CGM.getIntrinsic(ID); 3640 return Builder.CreateCall(F, Ops, ""); 3641 } 3642 } 3643} 3644