CGBuiltin.cpp revision 5c22ad2ef6bf39da22d5190025e0ddfd4b568b2a
1//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This contains code to emit Builtin calls as LLVM code. 11// 12//===----------------------------------------------------------------------===// 13 14#include "TargetInfo.h" 15#include "CodeGenFunction.h" 16#include "CodeGenModule.h" 17#include "CGObjCRuntime.h" 18#include "clang/Basic/TargetInfo.h" 19#include "clang/AST/APValue.h" 20#include "clang/AST/ASTContext.h" 21#include "clang/AST/Decl.h" 22#include "clang/Basic/TargetBuiltins.h" 23#include "llvm/Intrinsics.h" 24#include "llvm/Target/TargetData.h" 25 26using namespace clang; 27using namespace CodeGen; 28using namespace llvm; 29 30/// getBuiltinLibFunction - Given a builtin id for a function like 31/// "__builtin_fabsf", return a Function* for "fabsf". 32llvm::Value *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD, 33 unsigned BuiltinID) { 34 assert(Context.BuiltinInfo.isLibFunction(BuiltinID)); 35 36 // Get the name, skip over the __builtin_ prefix (if necessary). 37 StringRef Name; 38 GlobalDecl D(FD); 39 40 // If the builtin has been declared explicitly with an assembler label, 41 // use the mangled name. This differs from the plain label on platforms 42 // that prefix labels. 43 if (FD->hasAttr<AsmLabelAttr>()) 44 Name = getMangledName(D); 45 else 46 Name = Context.BuiltinInfo.GetName(BuiltinID) + 10; 47 48 llvm::FunctionType *Ty = 49 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType())); 50 51 return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false); 52} 53 54/// Emit the conversions required to turn the given value into an 55/// integer of the given size. 56static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V, 57 QualType T, llvm::IntegerType *IntType) { 58 V = CGF.EmitToMemory(V, T); 59 60 if (V->getType()->isPointerTy()) 61 return CGF.Builder.CreatePtrToInt(V, IntType); 62 63 assert(V->getType() == IntType); 64 return V; 65} 66 67static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V, 68 QualType T, llvm::Type *ResultType) { 69 V = CGF.EmitFromMemory(V, T); 70 71 if (ResultType->isPointerTy()) 72 return CGF.Builder.CreateIntToPtr(V, ResultType); 73 74 assert(V->getType() == ResultType); 75 return V; 76} 77 78/// Utility to insert an atomic instruction based on Instrinsic::ID 79/// and the expression node. 80static RValue EmitBinaryAtomic(CodeGenFunction &CGF, 81 llvm::AtomicRMWInst::BinOp Kind, 82 const CallExpr *E) { 83 QualType T = E->getType(); 84 assert(E->getArg(0)->getType()->isPointerType()); 85 assert(CGF.getContext().hasSameUnqualifiedType(T, 86 E->getArg(0)->getType()->getPointeeType())); 87 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())); 88 89 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); 90 unsigned AddrSpace = 91 cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace(); 92 93 llvm::IntegerType *IntType = 94 llvm::IntegerType::get(CGF.getLLVMContext(), 95 CGF.getContext().getTypeSize(T)); 96 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); 97 98 llvm::Value *Args[2]; 99 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); 100 Args[1] = CGF.EmitScalarExpr(E->getArg(1)); 101 llvm::Type *ValueType = Args[1]->getType(); 102 Args[1] = EmitToInt(CGF, Args[1], T, IntType); 103 104 llvm::Value *Result = 105 CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1], 106 llvm::SequentiallyConsistent); 107 Result = EmitFromInt(CGF, Result, T, ValueType); 108 return RValue::get(Result); 109} 110 111/// Utility to insert an atomic instruction based Instrinsic::ID and 112/// the expression node, where the return value is the result of the 113/// operation. 114static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF, 115 llvm::AtomicRMWInst::BinOp Kind, 116 const CallExpr *E, 117 Instruction::BinaryOps Op) { 118 QualType T = E->getType(); 119 assert(E->getArg(0)->getType()->isPointerType()); 120 assert(CGF.getContext().hasSameUnqualifiedType(T, 121 E->getArg(0)->getType()->getPointeeType())); 122 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())); 123 124 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); 125 unsigned AddrSpace = 126 cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace(); 127 128 llvm::IntegerType *IntType = 129 llvm::IntegerType::get(CGF.getLLVMContext(), 130 CGF.getContext().getTypeSize(T)); 131 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); 132 133 llvm::Value *Args[2]; 134 Args[1] = CGF.EmitScalarExpr(E->getArg(1)); 135 llvm::Type *ValueType = Args[1]->getType(); 136 Args[1] = EmitToInt(CGF, Args[1], T, IntType); 137 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); 138 139 llvm::Value *Result = 140 CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1], 141 llvm::SequentiallyConsistent); 142 Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]); 143 Result = EmitFromInt(CGF, Result, T, ValueType); 144 return RValue::get(Result); 145} 146 147/// EmitFAbs - Emit a call to fabs/fabsf/fabsl, depending on the type of ValTy, 148/// which must be a scalar floating point type. 149static Value *EmitFAbs(CodeGenFunction &CGF, Value *V, QualType ValTy) { 150 const BuiltinType *ValTyP = ValTy->getAs<BuiltinType>(); 151 assert(ValTyP && "isn't scalar fp type!"); 152 153 StringRef FnName; 154 switch (ValTyP->getKind()) { 155 default: assert(0 && "Isn't a scalar fp type!"); 156 case BuiltinType::Float: FnName = "fabsf"; break; 157 case BuiltinType::Double: FnName = "fabs"; break; 158 case BuiltinType::LongDouble: FnName = "fabsl"; break; 159 } 160 161 // The prototype is something that takes and returns whatever V's type is. 162 llvm::FunctionType *FT = llvm::FunctionType::get(V->getType(), V->getType(), 163 false); 164 llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction(FT, FnName); 165 166 return CGF.Builder.CreateCall(Fn, V, "abs"); 167} 168 169static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *Fn, 170 const CallExpr *E, llvm::Value *calleeValue) { 171 return CGF.EmitCall(E->getCallee()->getType(), calleeValue, 172 ReturnValueSlot(), E->arg_begin(), E->arg_end(), Fn); 173} 174 175RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, 176 unsigned BuiltinID, const CallExpr *E) { 177 // See if we can constant fold this builtin. If so, don't emit it at all. 178 Expr::EvalResult Result; 179 if (E->Evaluate(Result, CGM.getContext()) && 180 !Result.hasSideEffects()) { 181 if (Result.Val.isInt()) 182 return RValue::get(llvm::ConstantInt::get(getLLVMContext(), 183 Result.Val.getInt())); 184 if (Result.Val.isFloat()) 185 return RValue::get(llvm::ConstantFP::get(getLLVMContext(), 186 Result.Val.getFloat())); 187 } 188 189 switch (BuiltinID) { 190 default: break; // Handle intrinsics and libm functions below. 191 case Builtin::BI__builtin___CFStringMakeConstantString: 192 case Builtin::BI__builtin___NSStringMakeConstantString: 193 return RValue::get(CGM.EmitConstantExpr(E, E->getType(), 0)); 194 case Builtin::BI__builtin_stdarg_start: 195 case Builtin::BI__builtin_va_start: 196 case Builtin::BI__builtin_va_end: { 197 Value *ArgValue = EmitVAListRef(E->getArg(0)); 198 llvm::Type *DestType = Int8PtrTy; 199 if (ArgValue->getType() != DestType) 200 ArgValue = Builder.CreateBitCast(ArgValue, DestType, 201 ArgValue->getName().data()); 202 203 Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_end) ? 204 Intrinsic::vaend : Intrinsic::vastart; 205 return RValue::get(Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue)); 206 } 207 case Builtin::BI__builtin_va_copy: { 208 Value *DstPtr = EmitVAListRef(E->getArg(0)); 209 Value *SrcPtr = EmitVAListRef(E->getArg(1)); 210 211 llvm::Type *Type = Int8PtrTy; 212 213 DstPtr = Builder.CreateBitCast(DstPtr, Type); 214 SrcPtr = Builder.CreateBitCast(SrcPtr, Type); 215 return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy), 216 DstPtr, SrcPtr)); 217 } 218 case Builtin::BI__builtin_abs: { 219 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 220 221 Value *NegOp = Builder.CreateNeg(ArgValue, "neg"); 222 Value *CmpResult = 223 Builder.CreateICmpSGE(ArgValue, 224 llvm::Constant::getNullValue(ArgValue->getType()), 225 "abscond"); 226 Value *Result = 227 Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs"); 228 229 return RValue::get(Result); 230 } 231 case Builtin::BI__builtin_ctz: 232 case Builtin::BI__builtin_ctzl: 233 case Builtin::BI__builtin_ctzll: { 234 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 235 236 llvm::Type *ArgType = ArgValue->getType(); 237 Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); 238 239 llvm::Type *ResultType = ConvertType(E->getType()); 240 Value *Result = Builder.CreateCall(F, ArgValue, "tmp"); 241 if (Result->getType() != ResultType) 242 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, 243 "cast"); 244 return RValue::get(Result); 245 } 246 case Builtin::BI__builtin_clz: 247 case Builtin::BI__builtin_clzl: 248 case Builtin::BI__builtin_clzll: { 249 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 250 251 llvm::Type *ArgType = ArgValue->getType(); 252 Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType); 253 254 llvm::Type *ResultType = ConvertType(E->getType()); 255 Value *Result = Builder.CreateCall(F, ArgValue, "tmp"); 256 if (Result->getType() != ResultType) 257 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, 258 "cast"); 259 return RValue::get(Result); 260 } 261 case Builtin::BI__builtin_ffs: 262 case Builtin::BI__builtin_ffsl: 263 case Builtin::BI__builtin_ffsll: { 264 // ffs(x) -> x ? cttz(x) + 1 : 0 265 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 266 267 llvm::Type *ArgType = ArgValue->getType(); 268 Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); 269 270 llvm::Type *ResultType = ConvertType(E->getType()); 271 Value *Tmp = Builder.CreateAdd(Builder.CreateCall(F, ArgValue, "tmp"), 272 llvm::ConstantInt::get(ArgType, 1), "tmp"); 273 Value *Zero = llvm::Constant::getNullValue(ArgType); 274 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero"); 275 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs"); 276 if (Result->getType() != ResultType) 277 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, 278 "cast"); 279 return RValue::get(Result); 280 } 281 case Builtin::BI__builtin_parity: 282 case Builtin::BI__builtin_parityl: 283 case Builtin::BI__builtin_parityll: { 284 // parity(x) -> ctpop(x) & 1 285 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 286 287 llvm::Type *ArgType = ArgValue->getType(); 288 Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType); 289 290 llvm::Type *ResultType = ConvertType(E->getType()); 291 Value *Tmp = Builder.CreateCall(F, ArgValue, "tmp"); 292 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1), 293 "tmp"); 294 if (Result->getType() != ResultType) 295 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, 296 "cast"); 297 return RValue::get(Result); 298 } 299 case Builtin::BI__builtin_popcount: 300 case Builtin::BI__builtin_popcountl: 301 case Builtin::BI__builtin_popcountll: { 302 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 303 304 llvm::Type *ArgType = ArgValue->getType(); 305 Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType); 306 307 llvm::Type *ResultType = ConvertType(E->getType()); 308 Value *Result = Builder.CreateCall(F, ArgValue, "tmp"); 309 if (Result->getType() != ResultType) 310 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, 311 "cast"); 312 return RValue::get(Result); 313 } 314 case Builtin::BI__builtin_expect: { 315 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 316 llvm::Type *ArgType = ArgValue->getType(); 317 318 Value *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType); 319 Value *ExpectedValue = EmitScalarExpr(E->getArg(1)); 320 321 Value *Result = Builder.CreateCall2(FnExpect, ArgValue, ExpectedValue, 322 "expval"); 323 return RValue::get(Result); 324 } 325 case Builtin::BI__builtin_bswap32: 326 case Builtin::BI__builtin_bswap64: { 327 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 328 llvm::Type *ArgType = ArgValue->getType(); 329 Value *F = CGM.getIntrinsic(Intrinsic::bswap, ArgType); 330 return RValue::get(Builder.CreateCall(F, ArgValue, "tmp")); 331 } 332 case Builtin::BI__builtin_object_size: { 333 // We pass this builtin onto the optimizer so that it can 334 // figure out the object size in more complex cases. 335 llvm::Type *ResType = ConvertType(E->getType()); 336 337 // LLVM only supports 0 and 2, make sure that we pass along that 338 // as a boolean. 339 Value *Ty = EmitScalarExpr(E->getArg(1)); 340 ConstantInt *CI = dyn_cast<ConstantInt>(Ty); 341 assert(CI); 342 uint64_t val = CI->getZExtValue(); 343 CI = ConstantInt::get(Builder.getInt1Ty(), (val & 0x2) >> 1); 344 345 Value *F = CGM.getIntrinsic(Intrinsic::objectsize, ResType); 346 return RValue::get(Builder.CreateCall2(F, 347 EmitScalarExpr(E->getArg(0)), 348 CI)); 349 } 350 case Builtin::BI__builtin_prefetch: { 351 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0)); 352 // FIXME: Technically these constants should of type 'int', yes? 353 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) : 354 llvm::ConstantInt::get(Int32Ty, 0); 355 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : 356 llvm::ConstantInt::get(Int32Ty, 3); 357 Value *Data = llvm::ConstantInt::get(Int32Ty, 1); 358 Value *F = CGM.getIntrinsic(Intrinsic::prefetch); 359 return RValue::get(Builder.CreateCall4(F, Address, RW, Locality, Data)); 360 } 361 case Builtin::BI__builtin_trap: { 362 Value *F = CGM.getIntrinsic(Intrinsic::trap); 363 return RValue::get(Builder.CreateCall(F)); 364 } 365 case Builtin::BI__builtin_unreachable: { 366 if (CatchUndefined) 367 EmitBranch(getTrapBB()); 368 else 369 Builder.CreateUnreachable(); 370 371 // We do need to preserve an insertion point. 372 EmitBlock(createBasicBlock("unreachable.cont")); 373 374 return RValue::get(0); 375 } 376 377 case Builtin::BI__builtin_powi: 378 case Builtin::BI__builtin_powif: 379 case Builtin::BI__builtin_powil: { 380 Value *Base = EmitScalarExpr(E->getArg(0)); 381 Value *Exponent = EmitScalarExpr(E->getArg(1)); 382 llvm::Type *ArgType = Base->getType(); 383 Value *F = CGM.getIntrinsic(Intrinsic::powi, ArgType); 384 return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp")); 385 } 386 387 case Builtin::BI__builtin_isgreater: 388 case Builtin::BI__builtin_isgreaterequal: 389 case Builtin::BI__builtin_isless: 390 case Builtin::BI__builtin_islessequal: 391 case Builtin::BI__builtin_islessgreater: 392 case Builtin::BI__builtin_isunordered: { 393 // Ordered comparisons: we know the arguments to these are matching scalar 394 // floating point values. 395 Value *LHS = EmitScalarExpr(E->getArg(0)); 396 Value *RHS = EmitScalarExpr(E->getArg(1)); 397 398 switch (BuiltinID) { 399 default: assert(0 && "Unknown ordered comparison"); 400 case Builtin::BI__builtin_isgreater: 401 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp"); 402 break; 403 case Builtin::BI__builtin_isgreaterequal: 404 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp"); 405 break; 406 case Builtin::BI__builtin_isless: 407 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp"); 408 break; 409 case Builtin::BI__builtin_islessequal: 410 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp"); 411 break; 412 case Builtin::BI__builtin_islessgreater: 413 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp"); 414 break; 415 case Builtin::BI__builtin_isunordered: 416 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp"); 417 break; 418 } 419 // ZExt bool to int type. 420 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType()), 421 "tmp")); 422 } 423 case Builtin::BI__builtin_isnan: { 424 Value *V = EmitScalarExpr(E->getArg(0)); 425 V = Builder.CreateFCmpUNO(V, V, "cmp"); 426 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()), "tmp")); 427 } 428 429 case Builtin::BI__builtin_isinf: { 430 // isinf(x) --> fabs(x) == infinity 431 Value *V = EmitScalarExpr(E->getArg(0)); 432 V = EmitFAbs(*this, V, E->getArg(0)->getType()); 433 434 V = Builder.CreateFCmpOEQ(V, ConstantFP::getInfinity(V->getType()),"isinf"); 435 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()), "tmp")); 436 } 437 438 // TODO: BI__builtin_isinf_sign 439 // isinf_sign(x) -> isinf(x) ? (signbit(x) ? -1 : 1) : 0 440 441 case Builtin::BI__builtin_isnormal: { 442 // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min 443 Value *V = EmitScalarExpr(E->getArg(0)); 444 Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq"); 445 446 Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType()); 447 Value *IsLessThanInf = 448 Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf"); 449 APFloat Smallest = APFloat::getSmallestNormalized( 450 getContext().getFloatTypeSemantics(E->getArg(0)->getType())); 451 Value *IsNormal = 452 Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest), 453 "isnormal"); 454 V = Builder.CreateAnd(Eq, IsLessThanInf, "and"); 455 V = Builder.CreateAnd(V, IsNormal, "and"); 456 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); 457 } 458 459 case Builtin::BI__builtin_isfinite: { 460 // isfinite(x) --> x == x && fabs(x) != infinity; 461 Value *V = EmitScalarExpr(E->getArg(0)); 462 Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq"); 463 464 Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType()); 465 Value *IsNotInf = 466 Builder.CreateFCmpUNE(Abs, ConstantFP::getInfinity(V->getType()),"isinf"); 467 468 V = Builder.CreateAnd(Eq, IsNotInf, "and"); 469 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); 470 } 471 472 case Builtin::BI__builtin_fpclassify: { 473 Value *V = EmitScalarExpr(E->getArg(5)); 474 llvm::Type *Ty = ConvertType(E->getArg(5)->getType()); 475 476 // Create Result 477 BasicBlock *Begin = Builder.GetInsertBlock(); 478 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn); 479 Builder.SetInsertPoint(End); 480 PHINode *Result = 481 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4, 482 "fpclassify_result"); 483 484 // if (V==0) return FP_ZERO 485 Builder.SetInsertPoint(Begin); 486 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty), 487 "iszero"); 488 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4)); 489 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn); 490 Builder.CreateCondBr(IsZero, End, NotZero); 491 Result->addIncoming(ZeroLiteral, Begin); 492 493 // if (V != V) return FP_NAN 494 Builder.SetInsertPoint(NotZero); 495 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp"); 496 Value *NanLiteral = EmitScalarExpr(E->getArg(0)); 497 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn); 498 Builder.CreateCondBr(IsNan, End, NotNan); 499 Result->addIncoming(NanLiteral, NotZero); 500 501 // if (fabs(V) == infinity) return FP_INFINITY 502 Builder.SetInsertPoint(NotNan); 503 Value *VAbs = EmitFAbs(*this, V, E->getArg(5)->getType()); 504 Value *IsInf = 505 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()), 506 "isinf"); 507 Value *InfLiteral = EmitScalarExpr(E->getArg(1)); 508 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn); 509 Builder.CreateCondBr(IsInf, End, NotInf); 510 Result->addIncoming(InfLiteral, NotNan); 511 512 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL 513 Builder.SetInsertPoint(NotInf); 514 APFloat Smallest = APFloat::getSmallestNormalized( 515 getContext().getFloatTypeSemantics(E->getArg(5)->getType())); 516 Value *IsNormal = 517 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest), 518 "isnormal"); 519 Value *NormalResult = 520 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)), 521 EmitScalarExpr(E->getArg(3))); 522 Builder.CreateBr(End); 523 Result->addIncoming(NormalResult, NotInf); 524 525 // return Result 526 Builder.SetInsertPoint(End); 527 return RValue::get(Result); 528 } 529 530 case Builtin::BIalloca: 531 case Builtin::BI__builtin_alloca: { 532 Value *Size = EmitScalarExpr(E->getArg(0)); 533 return RValue::get(Builder.CreateAlloca(Builder.getInt8Ty(), Size, "tmp")); 534 } 535 case Builtin::BIbzero: 536 case Builtin::BI__builtin_bzero: { 537 Value *Address = EmitScalarExpr(E->getArg(0)); 538 Value *SizeVal = EmitScalarExpr(E->getArg(1)); 539 Builder.CreateMemSet(Address, Builder.getInt8(0), SizeVal, 1, false); 540 return RValue::get(Address); 541 } 542 case Builtin::BImemcpy: 543 case Builtin::BI__builtin_memcpy: { 544 Value *Address = EmitScalarExpr(E->getArg(0)); 545 Value *SrcAddr = EmitScalarExpr(E->getArg(1)); 546 Value *SizeVal = EmitScalarExpr(E->getArg(2)); 547 Builder.CreateMemCpy(Address, SrcAddr, SizeVal, 1, false); 548 return RValue::get(Address); 549 } 550 551 case Builtin::BI__builtin___memcpy_chk: { 552 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memset iff cst1<=cst2. 553 if (!E->getArg(2)->isEvaluatable(CGM.getContext()) || 554 !E->getArg(3)->isEvaluatable(CGM.getContext())) 555 break; 556 llvm::APSInt Size = E->getArg(2)->EvaluateAsInt(CGM.getContext()); 557 llvm::APSInt DstSize = E->getArg(3)->EvaluateAsInt(CGM.getContext()); 558 if (Size.ugt(DstSize)) 559 break; 560 Value *Dest = EmitScalarExpr(E->getArg(0)); 561 Value *Src = EmitScalarExpr(E->getArg(1)); 562 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); 563 Builder.CreateMemCpy(Dest, Src, SizeVal, 1, false); 564 return RValue::get(Dest); 565 } 566 567 case Builtin::BI__builtin_objc_memmove_collectable: { 568 Value *Address = EmitScalarExpr(E->getArg(0)); 569 Value *SrcAddr = EmitScalarExpr(E->getArg(1)); 570 Value *SizeVal = EmitScalarExpr(E->getArg(2)); 571 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, 572 Address, SrcAddr, SizeVal); 573 return RValue::get(Address); 574 } 575 576 case Builtin::BI__builtin___memmove_chk: { 577 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memset iff cst1<=cst2. 578 if (!E->getArg(2)->isEvaluatable(CGM.getContext()) || 579 !E->getArg(3)->isEvaluatable(CGM.getContext())) 580 break; 581 llvm::APSInt Size = E->getArg(2)->EvaluateAsInt(CGM.getContext()); 582 llvm::APSInt DstSize = E->getArg(3)->EvaluateAsInt(CGM.getContext()); 583 if (Size.ugt(DstSize)) 584 break; 585 Value *Dest = EmitScalarExpr(E->getArg(0)); 586 Value *Src = EmitScalarExpr(E->getArg(1)); 587 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); 588 Builder.CreateMemMove(Dest, Src, SizeVal, 1, false); 589 return RValue::get(Dest); 590 } 591 592 case Builtin::BImemmove: 593 case Builtin::BI__builtin_memmove: { 594 Value *Address = EmitScalarExpr(E->getArg(0)); 595 Value *SrcAddr = EmitScalarExpr(E->getArg(1)); 596 Value *SizeVal = EmitScalarExpr(E->getArg(2)); 597 Builder.CreateMemMove(Address, SrcAddr, SizeVal, 1, false); 598 return RValue::get(Address); 599 } 600 case Builtin::BImemset: 601 case Builtin::BI__builtin_memset: { 602 Value *Address = EmitScalarExpr(E->getArg(0)); 603 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), 604 Builder.getInt8Ty()); 605 Value *SizeVal = EmitScalarExpr(E->getArg(2)); 606 Builder.CreateMemSet(Address, ByteVal, SizeVal, 1, false); 607 return RValue::get(Address); 608 } 609 case Builtin::BI__builtin___memset_chk: { 610 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2. 611 if (!E->getArg(2)->isEvaluatable(CGM.getContext()) || 612 !E->getArg(3)->isEvaluatable(CGM.getContext())) 613 break; 614 llvm::APSInt Size = E->getArg(2)->EvaluateAsInt(CGM.getContext()); 615 llvm::APSInt DstSize = E->getArg(3)->EvaluateAsInt(CGM.getContext()); 616 if (Size.ugt(DstSize)) 617 break; 618 Value *Address = EmitScalarExpr(E->getArg(0)); 619 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), 620 Builder.getInt8Ty()); 621 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); 622 Builder.CreateMemSet(Address, ByteVal, SizeVal, 1, false); 623 624 return RValue::get(Address); 625 } 626 case Builtin::BI__builtin_dwarf_cfa: { 627 // The offset in bytes from the first argument to the CFA. 628 // 629 // Why on earth is this in the frontend? Is there any reason at 630 // all that the backend can't reasonably determine this while 631 // lowering llvm.eh.dwarf.cfa()? 632 // 633 // TODO: If there's a satisfactory reason, add a target hook for 634 // this instead of hard-coding 0, which is correct for most targets. 635 int32_t Offset = 0; 636 637 Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa); 638 return RValue::get(Builder.CreateCall(F, 639 llvm::ConstantInt::get(Int32Ty, Offset))); 640 } 641 case Builtin::BI__builtin_return_address: { 642 Value *Depth = EmitScalarExpr(E->getArg(0)); 643 Depth = Builder.CreateIntCast(Depth, Int32Ty, false, "tmp"); 644 Value *F = CGM.getIntrinsic(Intrinsic::returnaddress); 645 return RValue::get(Builder.CreateCall(F, Depth)); 646 } 647 case Builtin::BI__builtin_frame_address: { 648 Value *Depth = EmitScalarExpr(E->getArg(0)); 649 Depth = Builder.CreateIntCast(Depth, Int32Ty, false, "tmp"); 650 Value *F = CGM.getIntrinsic(Intrinsic::frameaddress); 651 return RValue::get(Builder.CreateCall(F, Depth)); 652 } 653 case Builtin::BI__builtin_extract_return_addr: { 654 Value *Address = EmitScalarExpr(E->getArg(0)); 655 Value *Result = getTargetHooks().decodeReturnAddress(*this, Address); 656 return RValue::get(Result); 657 } 658 case Builtin::BI__builtin_frob_return_addr: { 659 Value *Address = EmitScalarExpr(E->getArg(0)); 660 Value *Result = getTargetHooks().encodeReturnAddress(*this, Address); 661 return RValue::get(Result); 662 } 663 case Builtin::BI__builtin_dwarf_sp_column: { 664 llvm::IntegerType *Ty 665 = cast<llvm::IntegerType>(ConvertType(E->getType())); 666 int Column = getTargetHooks().getDwarfEHStackPointer(CGM); 667 if (Column == -1) { 668 CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column"); 669 return RValue::get(llvm::UndefValue::get(Ty)); 670 } 671 return RValue::get(llvm::ConstantInt::get(Ty, Column, true)); 672 } 673 case Builtin::BI__builtin_init_dwarf_reg_size_table: { 674 Value *Address = EmitScalarExpr(E->getArg(0)); 675 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address)) 676 CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table"); 677 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType()))); 678 } 679 case Builtin::BI__builtin_eh_return: { 680 Value *Int = EmitScalarExpr(E->getArg(0)); 681 Value *Ptr = EmitScalarExpr(E->getArg(1)); 682 683 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType()); 684 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) && 685 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants"); 686 Value *F = CGM.getIntrinsic(IntTy->getBitWidth() == 32 687 ? Intrinsic::eh_return_i32 688 : Intrinsic::eh_return_i64); 689 Builder.CreateCall2(F, Int, Ptr); 690 Builder.CreateUnreachable(); 691 692 // We do need to preserve an insertion point. 693 EmitBlock(createBasicBlock("builtin_eh_return.cont")); 694 695 return RValue::get(0); 696 } 697 case Builtin::BI__builtin_unwind_init: { 698 Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init); 699 return RValue::get(Builder.CreateCall(F)); 700 } 701 case Builtin::BI__builtin_extend_pointer: { 702 // Extends a pointer to the size of an _Unwind_Word, which is 703 // uint64_t on all platforms. Generally this gets poked into a 704 // register and eventually used as an address, so if the 705 // addressing registers are wider than pointers and the platform 706 // doesn't implicitly ignore high-order bits when doing 707 // addressing, we need to make sure we zext / sext based on 708 // the platform's expectations. 709 // 710 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html 711 712 // Cast the pointer to intptr_t. 713 Value *Ptr = EmitScalarExpr(E->getArg(0)); 714 Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast"); 715 716 // If that's 64 bits, we're done. 717 if (IntPtrTy->getBitWidth() == 64) 718 return RValue::get(Result); 719 720 // Otherwise, ask the codegen data what to do. 721 if (getTargetHooks().extendPointerWithSExt()) 722 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext")); 723 else 724 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext")); 725 } 726 case Builtin::BI__builtin_setjmp: { 727 // Buffer is a void**. 728 Value *Buf = EmitScalarExpr(E->getArg(0)); 729 730 // Store the frame pointer to the setjmp buffer. 731 Value *FrameAddr = 732 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress), 733 ConstantInt::get(Int32Ty, 0)); 734 Builder.CreateStore(FrameAddr, Buf); 735 736 // Store the stack pointer to the setjmp buffer. 737 Value *StackAddr = 738 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave)); 739 Value *StackSaveSlot = 740 Builder.CreateGEP(Buf, ConstantInt::get(Int32Ty, 2)); 741 Builder.CreateStore(StackAddr, StackSaveSlot); 742 743 // Call LLVM's EH setjmp, which is lightweight. 744 Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp); 745 Buf = Builder.CreateBitCast(Buf, Int8PtrTy); 746 return RValue::get(Builder.CreateCall(F, Buf)); 747 } 748 case Builtin::BI__builtin_longjmp: { 749 Value *Buf = EmitScalarExpr(E->getArg(0)); 750 Buf = Builder.CreateBitCast(Buf, Int8PtrTy); 751 752 // Call LLVM's EH longjmp, which is lightweight. 753 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf); 754 755 // longjmp doesn't return; mark this as unreachable. 756 Builder.CreateUnreachable(); 757 758 // We do need to preserve an insertion point. 759 EmitBlock(createBasicBlock("longjmp.cont")); 760 761 return RValue::get(0); 762 } 763 case Builtin::BI__sync_fetch_and_add: 764 case Builtin::BI__sync_fetch_and_sub: 765 case Builtin::BI__sync_fetch_and_or: 766 case Builtin::BI__sync_fetch_and_and: 767 case Builtin::BI__sync_fetch_and_xor: 768 case Builtin::BI__sync_add_and_fetch: 769 case Builtin::BI__sync_sub_and_fetch: 770 case Builtin::BI__sync_and_and_fetch: 771 case Builtin::BI__sync_or_and_fetch: 772 case Builtin::BI__sync_xor_and_fetch: 773 case Builtin::BI__sync_val_compare_and_swap: 774 case Builtin::BI__sync_bool_compare_and_swap: 775 case Builtin::BI__sync_lock_test_and_set: 776 case Builtin::BI__sync_lock_release: 777 case Builtin::BI__sync_swap: 778 assert(0 && "Shouldn't make it through sema"); 779 case Builtin::BI__sync_fetch_and_add_1: 780 case Builtin::BI__sync_fetch_and_add_2: 781 case Builtin::BI__sync_fetch_and_add_4: 782 case Builtin::BI__sync_fetch_and_add_8: 783 case Builtin::BI__sync_fetch_and_add_16: 784 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E); 785 case Builtin::BI__sync_fetch_and_sub_1: 786 case Builtin::BI__sync_fetch_and_sub_2: 787 case Builtin::BI__sync_fetch_and_sub_4: 788 case Builtin::BI__sync_fetch_and_sub_8: 789 case Builtin::BI__sync_fetch_and_sub_16: 790 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E); 791 case Builtin::BI__sync_fetch_and_or_1: 792 case Builtin::BI__sync_fetch_and_or_2: 793 case Builtin::BI__sync_fetch_and_or_4: 794 case Builtin::BI__sync_fetch_and_or_8: 795 case Builtin::BI__sync_fetch_and_or_16: 796 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E); 797 case Builtin::BI__sync_fetch_and_and_1: 798 case Builtin::BI__sync_fetch_and_and_2: 799 case Builtin::BI__sync_fetch_and_and_4: 800 case Builtin::BI__sync_fetch_and_and_8: 801 case Builtin::BI__sync_fetch_and_and_16: 802 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E); 803 case Builtin::BI__sync_fetch_and_xor_1: 804 case Builtin::BI__sync_fetch_and_xor_2: 805 case Builtin::BI__sync_fetch_and_xor_4: 806 case Builtin::BI__sync_fetch_and_xor_8: 807 case Builtin::BI__sync_fetch_and_xor_16: 808 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E); 809 810 // Clang extensions: not overloaded yet. 811 case Builtin::BI__sync_fetch_and_min: 812 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E); 813 case Builtin::BI__sync_fetch_and_max: 814 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E); 815 case Builtin::BI__sync_fetch_and_umin: 816 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E); 817 case Builtin::BI__sync_fetch_and_umax: 818 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E); 819 820 case Builtin::BI__sync_add_and_fetch_1: 821 case Builtin::BI__sync_add_and_fetch_2: 822 case Builtin::BI__sync_add_and_fetch_4: 823 case Builtin::BI__sync_add_and_fetch_8: 824 case Builtin::BI__sync_add_and_fetch_16: 825 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E, 826 llvm::Instruction::Add); 827 case Builtin::BI__sync_sub_and_fetch_1: 828 case Builtin::BI__sync_sub_and_fetch_2: 829 case Builtin::BI__sync_sub_and_fetch_4: 830 case Builtin::BI__sync_sub_and_fetch_8: 831 case Builtin::BI__sync_sub_and_fetch_16: 832 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E, 833 llvm::Instruction::Sub); 834 case Builtin::BI__sync_and_and_fetch_1: 835 case Builtin::BI__sync_and_and_fetch_2: 836 case Builtin::BI__sync_and_and_fetch_4: 837 case Builtin::BI__sync_and_and_fetch_8: 838 case Builtin::BI__sync_and_and_fetch_16: 839 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E, 840 llvm::Instruction::And); 841 case Builtin::BI__sync_or_and_fetch_1: 842 case Builtin::BI__sync_or_and_fetch_2: 843 case Builtin::BI__sync_or_and_fetch_4: 844 case Builtin::BI__sync_or_and_fetch_8: 845 case Builtin::BI__sync_or_and_fetch_16: 846 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E, 847 llvm::Instruction::Or); 848 case Builtin::BI__sync_xor_and_fetch_1: 849 case Builtin::BI__sync_xor_and_fetch_2: 850 case Builtin::BI__sync_xor_and_fetch_4: 851 case Builtin::BI__sync_xor_and_fetch_8: 852 case Builtin::BI__sync_xor_and_fetch_16: 853 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E, 854 llvm::Instruction::Xor); 855 856 case Builtin::BI__sync_val_compare_and_swap_1: 857 case Builtin::BI__sync_val_compare_and_swap_2: 858 case Builtin::BI__sync_val_compare_and_swap_4: 859 case Builtin::BI__sync_val_compare_and_swap_8: 860 case Builtin::BI__sync_val_compare_and_swap_16: { 861 QualType T = E->getType(); 862 llvm::Value *DestPtr = EmitScalarExpr(E->getArg(0)); 863 unsigned AddrSpace = 864 cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace(); 865 866 llvm::IntegerType *IntType = 867 llvm::IntegerType::get(getLLVMContext(), 868 getContext().getTypeSize(T)); 869 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); 870 871 Value *Args[3]; 872 Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType); 873 Args[1] = EmitScalarExpr(E->getArg(1)); 874 llvm::Type *ValueType = Args[1]->getType(); 875 Args[1] = EmitToInt(*this, Args[1], T, IntType); 876 Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType); 877 878 Value *Result = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2], 879 llvm::SequentiallyConsistent); 880 Result = EmitFromInt(*this, Result, T, ValueType); 881 return RValue::get(Result); 882 } 883 884 case Builtin::BI__sync_bool_compare_and_swap_1: 885 case Builtin::BI__sync_bool_compare_and_swap_2: 886 case Builtin::BI__sync_bool_compare_and_swap_4: 887 case Builtin::BI__sync_bool_compare_and_swap_8: 888 case Builtin::BI__sync_bool_compare_and_swap_16: { 889 QualType T = E->getArg(1)->getType(); 890 llvm::Value *DestPtr = EmitScalarExpr(E->getArg(0)); 891 unsigned AddrSpace = 892 cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace(); 893 894 llvm::IntegerType *IntType = 895 llvm::IntegerType::get(getLLVMContext(), 896 getContext().getTypeSize(T)); 897 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); 898 899 Value *Args[3]; 900 Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType); 901 Args[1] = EmitToInt(*this, EmitScalarExpr(E->getArg(1)), T, IntType); 902 Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType); 903 904 Value *OldVal = Args[1]; 905 Value *PrevVal = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2], 906 llvm::SequentiallyConsistent); 907 Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal); 908 // zext bool to int. 909 Result = Builder.CreateZExt(Result, ConvertType(E->getType())); 910 return RValue::get(Result); 911 } 912 913 case Builtin::BI__sync_swap_1: 914 case Builtin::BI__sync_swap_2: 915 case Builtin::BI__sync_swap_4: 916 case Builtin::BI__sync_swap_8: 917 case Builtin::BI__sync_swap_16: 918 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E); 919 920 case Builtin::BI__sync_lock_test_and_set_1: 921 case Builtin::BI__sync_lock_test_and_set_2: 922 case Builtin::BI__sync_lock_test_and_set_4: 923 case Builtin::BI__sync_lock_test_and_set_8: 924 case Builtin::BI__sync_lock_test_and_set_16: 925 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E); 926 927 case Builtin::BI__sync_lock_release_1: 928 case Builtin::BI__sync_lock_release_2: 929 case Builtin::BI__sync_lock_release_4: 930 case Builtin::BI__sync_lock_release_8: 931 case Builtin::BI__sync_lock_release_16: { 932 Value *Ptr = EmitScalarExpr(E->getArg(0)); 933 llvm::Type *ElLLVMTy = 934 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 935 llvm::StoreInst *Store = 936 Builder.CreateStore(llvm::Constant::getNullValue(ElLLVMTy), Ptr); 937 QualType ElTy = E->getArg(0)->getType()->getPointeeType(); 938 CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy); 939 Store->setAlignment(StoreSize.getQuantity()); 940 Store->setAtomic(llvm::Release); 941 return RValue::get(0); 942 } 943 944 case Builtin::BI__sync_synchronize: { 945 // We assume this is supposed to correspond to a C++0x-style 946 // sequentially-consistent fence (i.e. this is only usable for 947 // synchonization, not device I/O or anything like that). This intrinsic 948 // is really badly designed in the sense that in theory, there isn't 949 // any way to safely use it... but in practice, it mostly works 950 // to use it with non-atomic loads and stores to get acquire/release 951 // semantics. 952 Builder.CreateFence(llvm::SequentiallyConsistent); 953 return RValue::get(0); 954 } 955 956 case Builtin::BI__builtin_llvm_memory_barrier: { 957 Value *C[5] = { 958 EmitScalarExpr(E->getArg(0)), 959 EmitScalarExpr(E->getArg(1)), 960 EmitScalarExpr(E->getArg(2)), 961 EmitScalarExpr(E->getArg(3)), 962 EmitScalarExpr(E->getArg(4)) 963 }; 964 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::memory_barrier), C); 965 return RValue::get(0); 966 } 967 968 // Library functions with special handling. 969 case Builtin::BIsqrt: 970 case Builtin::BIsqrtf: 971 case Builtin::BIsqrtl: { 972 // TODO: there is currently no set of optimizer flags 973 // sufficient for us to rewrite sqrt to @llvm.sqrt. 974 // -fmath-errno=0 is not good enough; we need finiteness. 975 // We could probably precondition the call with an ult 976 // against 0, but is that worth the complexity? 977 break; 978 } 979 980 case Builtin::BIpow: 981 case Builtin::BIpowf: 982 case Builtin::BIpowl: { 983 // Rewrite sqrt to intrinsic if allowed. 984 if (!FD->hasAttr<ConstAttr>()) 985 break; 986 Value *Base = EmitScalarExpr(E->getArg(0)); 987 Value *Exponent = EmitScalarExpr(E->getArg(1)); 988 llvm::Type *ArgType = Base->getType(); 989 Value *F = CGM.getIntrinsic(Intrinsic::pow, ArgType); 990 return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp")); 991 } 992 993 case Builtin::BIfma: 994 case Builtin::BIfmaf: 995 case Builtin::BIfmal: 996 case Builtin::BI__builtin_fma: 997 case Builtin::BI__builtin_fmaf: 998 case Builtin::BI__builtin_fmal: { 999 // Rewrite fma to intrinsic. 1000 Value *FirstArg = EmitScalarExpr(E->getArg(0)); 1001 llvm::Type *ArgType = FirstArg->getType(); 1002 Value *F = CGM.getIntrinsic(Intrinsic::fma, ArgType); 1003 return RValue::get(Builder.CreateCall3(F, FirstArg, 1004 EmitScalarExpr(E->getArg(1)), 1005 EmitScalarExpr(E->getArg(2)), 1006 "tmp")); 1007 } 1008 1009 case Builtin::BI__builtin_signbit: 1010 case Builtin::BI__builtin_signbitf: 1011 case Builtin::BI__builtin_signbitl: { 1012 LLVMContext &C = CGM.getLLVMContext(); 1013 1014 Value *Arg = EmitScalarExpr(E->getArg(0)); 1015 llvm::Type *ArgTy = Arg->getType(); 1016 if (ArgTy->isPPC_FP128Ty()) 1017 break; // FIXME: I'm not sure what the right implementation is here. 1018 int ArgWidth = ArgTy->getPrimitiveSizeInBits(); 1019 llvm::Type *ArgIntTy = llvm::IntegerType::get(C, ArgWidth); 1020 Value *BCArg = Builder.CreateBitCast(Arg, ArgIntTy); 1021 Value *ZeroCmp = llvm::Constant::getNullValue(ArgIntTy); 1022 Value *Result = Builder.CreateICmpSLT(BCArg, ZeroCmp); 1023 return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType()))); 1024 } 1025 case Builtin::BI__builtin_annotation: { 1026 llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0)); 1027 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::annotation, 1028 AnnVal->getType()); 1029 1030 // Get the annotation string, go through casts. Sema requires this to be a 1031 // non-wide string literal, potentially casted, so the cast<> is safe. 1032 const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts(); 1033 llvm::StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString(); 1034 return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc())); 1035 } 1036 } 1037 1038 // If this is an alias for a lib function (e.g. __builtin_sin), emit 1039 // the call using the normal call path, but using the unmangled 1040 // version of the function name. 1041 if (getContext().BuiltinInfo.isLibFunction(BuiltinID)) 1042 return emitLibraryCall(*this, FD, E, 1043 CGM.getBuiltinLibFunction(FD, BuiltinID)); 1044 1045 // If this is a predefined lib function (e.g. malloc), emit the call 1046 // using exactly the normal call path. 1047 if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID)) 1048 return emitLibraryCall(*this, FD, E, EmitScalarExpr(E->getCallee())); 1049 1050 // See if we have a target specific intrinsic. 1051 const char *Name = getContext().BuiltinInfo.GetName(BuiltinID); 1052 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic; 1053 if (const char *Prefix = 1054 llvm::Triple::getArchTypePrefix(Target.getTriple().getArch())) 1055 IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix, Name); 1056 1057 if (IntrinsicID != Intrinsic::not_intrinsic) { 1058 SmallVector<Value*, 16> Args; 1059 1060 // Find out if any arguments are required to be integer constant 1061 // expressions. 1062 unsigned ICEArguments = 0; 1063 ASTContext::GetBuiltinTypeError Error; 1064 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); 1065 assert(Error == ASTContext::GE_None && "Should not codegen an error"); 1066 1067 Function *F = CGM.getIntrinsic(IntrinsicID); 1068 llvm::FunctionType *FTy = F->getFunctionType(); 1069 1070 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) { 1071 Value *ArgValue; 1072 // If this is a normal argument, just emit it as a scalar. 1073 if ((ICEArguments & (1 << i)) == 0) { 1074 ArgValue = EmitScalarExpr(E->getArg(i)); 1075 } else { 1076 // If this is required to be a constant, constant fold it so that we 1077 // know that the generated intrinsic gets a ConstantInt. 1078 llvm::APSInt Result; 1079 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result,getContext()); 1080 assert(IsConst && "Constant arg isn't actually constant?"); 1081 (void)IsConst; 1082 ArgValue = llvm::ConstantInt::get(getLLVMContext(), Result); 1083 } 1084 1085 // If the intrinsic arg type is different from the builtin arg type 1086 // we need to do a bit cast. 1087 llvm::Type *PTy = FTy->getParamType(i); 1088 if (PTy != ArgValue->getType()) { 1089 assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) && 1090 "Must be able to losslessly bit cast to param"); 1091 ArgValue = Builder.CreateBitCast(ArgValue, PTy); 1092 } 1093 1094 Args.push_back(ArgValue); 1095 } 1096 1097 Value *V = Builder.CreateCall(F, Args); 1098 QualType BuiltinRetType = E->getType(); 1099 1100 llvm::Type *RetTy = llvm::Type::getVoidTy(getLLVMContext()); 1101 if (!BuiltinRetType->isVoidType()) RetTy = ConvertType(BuiltinRetType); 1102 1103 if (RetTy != V->getType()) { 1104 assert(V->getType()->canLosslesslyBitCastTo(RetTy) && 1105 "Must be able to losslessly bit cast result type"); 1106 V = Builder.CreateBitCast(V, RetTy); 1107 } 1108 1109 return RValue::get(V); 1110 } 1111 1112 // See if we have a target specific builtin that needs to be lowered. 1113 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E)) 1114 return RValue::get(V); 1115 1116 ErrorUnsupported(E, "builtin function"); 1117 1118 // Unknown builtin, for now just dump it out and return undef. 1119 if (hasAggregateLLVMType(E->getType())) 1120 return RValue::getAggregate(CreateMemTemp(E->getType())); 1121 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType()))); 1122} 1123 1124Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID, 1125 const CallExpr *E) { 1126 switch (Target.getTriple().getArch()) { 1127 case llvm::Triple::arm: 1128 case llvm::Triple::thumb: 1129 return EmitARMBuiltinExpr(BuiltinID, E); 1130 case llvm::Triple::x86: 1131 case llvm::Triple::x86_64: 1132 return EmitX86BuiltinExpr(BuiltinID, E); 1133 case llvm::Triple::ppc: 1134 case llvm::Triple::ppc64: 1135 return EmitPPCBuiltinExpr(BuiltinID, E); 1136 default: 1137 return 0; 1138 } 1139} 1140 1141static llvm::VectorType *GetNeonType(LLVMContext &C, unsigned type, bool q) { 1142 switch (type) { 1143 default: break; 1144 case 0: 1145 case 5: return llvm::VectorType::get(llvm::Type::getInt8Ty(C), 8 << (int)q); 1146 case 6: 1147 case 7: 1148 case 1: return llvm::VectorType::get(llvm::Type::getInt16Ty(C),4 << (int)q); 1149 case 2: return llvm::VectorType::get(llvm::Type::getInt32Ty(C),2 << (int)q); 1150 case 3: return llvm::VectorType::get(llvm::Type::getInt64Ty(C),1 << (int)q); 1151 case 4: return llvm::VectorType::get(llvm::Type::getFloatTy(C),2 << (int)q); 1152 }; 1153 return 0; 1154} 1155 1156Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) { 1157 unsigned nElts = cast<llvm::VectorType>(V->getType())->getNumElements(); 1158 SmallVector<Constant*, 16> Indices(nElts, C); 1159 Value* SV = llvm::ConstantVector::get(Indices); 1160 return Builder.CreateShuffleVector(V, V, SV, "lane"); 1161} 1162 1163Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops, 1164 const char *name, 1165 unsigned shift, bool rightshift) { 1166 unsigned j = 0; 1167 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end(); 1168 ai != ae; ++ai, ++j) 1169 if (shift > 0 && shift == j) 1170 Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift); 1171 else 1172 Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name); 1173 1174 return Builder.CreateCall(F, Ops, name); 1175} 1176 1177Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty, 1178 bool neg) { 1179 ConstantInt *CI = cast<ConstantInt>(V); 1180 int SV = CI->getSExtValue(); 1181 1182 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty); 1183 llvm::Constant *C = ConstantInt::get(VTy->getElementType(), neg ? -SV : SV); 1184 SmallVector<llvm::Constant*, 16> CV(VTy->getNumElements(), C); 1185 return llvm::ConstantVector::get(CV); 1186} 1187 1188/// GetPointeeAlignment - Given an expression with a pointer type, find the 1189/// alignment of the type referenced by the pointer. Skip over implicit 1190/// casts. 1191static Value *GetPointeeAlignment(CodeGenFunction &CGF, const Expr *Addr) { 1192 unsigned Align = 1; 1193 // Check if the type is a pointer. The implicit cast operand might not be. 1194 while (Addr->getType()->isPointerType()) { 1195 QualType PtTy = Addr->getType()->getPointeeType(); 1196 unsigned NewA = CGF.getContext().getTypeAlignInChars(PtTy).getQuantity(); 1197 if (NewA > Align) 1198 Align = NewA; 1199 1200 // If the address is an implicit cast, repeat with the cast operand. 1201 if (const ImplicitCastExpr *CastAddr = dyn_cast<ImplicitCastExpr>(Addr)) { 1202 Addr = CastAddr->getSubExpr(); 1203 continue; 1204 } 1205 break; 1206 } 1207 return llvm::ConstantInt::get(CGF.Int32Ty, Align); 1208} 1209 1210Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, 1211 const CallExpr *E) { 1212 if (BuiltinID == ARM::BI__clear_cache) { 1213 const FunctionDecl *FD = E->getDirectCallee(); 1214 // Oddly people write this call without args on occasion and gcc accepts 1215 // it - it's also marked as varargs in the description file. 1216 SmallVector<Value*, 2> Ops; 1217 for (unsigned i = 0; i < E->getNumArgs(); i++) 1218 Ops.push_back(EmitScalarExpr(E->getArg(i))); 1219 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType()); 1220 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty); 1221 StringRef Name = FD->getName(); 1222 return Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), Ops); 1223 } 1224 1225 if (BuiltinID == ARM::BI__builtin_arm_ldrexd) { 1226 Function *F = CGM.getIntrinsic(Intrinsic::arm_ldrexd); 1227 1228 Value *LdPtr = EmitScalarExpr(E->getArg(0)); 1229 Value *Val = Builder.CreateCall(F, LdPtr, "ldrexd"); 1230 1231 Value *Val0 = Builder.CreateExtractValue(Val, 1); 1232 Value *Val1 = Builder.CreateExtractValue(Val, 0); 1233 Val0 = Builder.CreateZExt(Val0, Int64Ty); 1234 Val1 = Builder.CreateZExt(Val1, Int64Ty); 1235 1236 Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32); 1237 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */); 1238 return Builder.CreateOr(Val, Val1); 1239 } 1240 1241 if (BuiltinID == ARM::BI__builtin_arm_strexd) { 1242 Function *F = CGM.getIntrinsic(Intrinsic::arm_strexd); 1243 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, NULL); 1244 1245 Value *One = llvm::ConstantInt::get(Int32Ty, 1); 1246 Value *Tmp = Builder.CreateAlloca(Int64Ty, One, "tmp"); 1247 Value *Val = EmitScalarExpr(E->getArg(0)); 1248 Builder.CreateStore(Val, Tmp); 1249 1250 Value *LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy)); 1251 Val = Builder.CreateLoad(LdPtr); 1252 1253 Value *Arg0 = Builder.CreateExtractValue(Val, 0); 1254 Value *Arg1 = Builder.CreateExtractValue(Val, 1); 1255 Value *StPtr = EmitScalarExpr(E->getArg(1)); 1256 return Builder.CreateCall3(F, Arg0, Arg1, StPtr, "strexd"); 1257 } 1258 1259 SmallVector<Value*, 4> Ops; 1260 for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) 1261 Ops.push_back(EmitScalarExpr(E->getArg(i))); 1262 1263 // vget_lane and vset_lane are not overloaded and do not have an extra 1264 // argument that specifies the vector type. 1265 switch (BuiltinID) { 1266 default: break; 1267 case ARM::BI__builtin_neon_vget_lane_i8: 1268 case ARM::BI__builtin_neon_vget_lane_i16: 1269 case ARM::BI__builtin_neon_vget_lane_i32: 1270 case ARM::BI__builtin_neon_vget_lane_i64: 1271 case ARM::BI__builtin_neon_vget_lane_f32: 1272 case ARM::BI__builtin_neon_vgetq_lane_i8: 1273 case ARM::BI__builtin_neon_vgetq_lane_i16: 1274 case ARM::BI__builtin_neon_vgetq_lane_i32: 1275 case ARM::BI__builtin_neon_vgetq_lane_i64: 1276 case ARM::BI__builtin_neon_vgetq_lane_f32: 1277 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), 1278 "vget_lane"); 1279 case ARM::BI__builtin_neon_vset_lane_i8: 1280 case ARM::BI__builtin_neon_vset_lane_i16: 1281 case ARM::BI__builtin_neon_vset_lane_i32: 1282 case ARM::BI__builtin_neon_vset_lane_i64: 1283 case ARM::BI__builtin_neon_vset_lane_f32: 1284 case ARM::BI__builtin_neon_vsetq_lane_i8: 1285 case ARM::BI__builtin_neon_vsetq_lane_i16: 1286 case ARM::BI__builtin_neon_vsetq_lane_i32: 1287 case ARM::BI__builtin_neon_vsetq_lane_i64: 1288 case ARM::BI__builtin_neon_vsetq_lane_f32: 1289 Ops.push_back(EmitScalarExpr(E->getArg(2))); 1290 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane"); 1291 } 1292 1293 // Get the last argument, which specifies the vector type. 1294 llvm::APSInt Result; 1295 const Expr *Arg = E->getArg(E->getNumArgs()-1); 1296 if (!Arg->isIntegerConstantExpr(Result, getContext())) 1297 return 0; 1298 1299 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f || 1300 BuiltinID == ARM::BI__builtin_arm_vcvtr_d) { 1301 // Determine the overloaded type of this builtin. 1302 llvm::Type *Ty; 1303 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f) 1304 Ty = llvm::Type::getFloatTy(getLLVMContext()); 1305 else 1306 Ty = llvm::Type::getDoubleTy(getLLVMContext()); 1307 1308 // Determine whether this is an unsigned conversion or not. 1309 bool usgn = Result.getZExtValue() == 1; 1310 unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr; 1311 1312 // Call the appropriate intrinsic. 1313 Function *F = CGM.getIntrinsic(Int, Ty); 1314 return Builder.CreateCall(F, Ops, "vcvtr"); 1315 } 1316 1317 // Determine the type of this overloaded NEON intrinsic. 1318 unsigned type = Result.getZExtValue(); 1319 bool usgn = type & 0x08; 1320 bool quad = type & 0x10; 1321 bool poly = (type & 0x7) == 5 || (type & 0x7) == 6; 1322 (void)poly; // Only used in assert()s. 1323 bool rightShift = false; 1324 1325 llvm::VectorType *VTy = GetNeonType(getLLVMContext(), type & 0x7, quad); 1326 llvm::Type *Ty = VTy; 1327 if (!Ty) 1328 return 0; 1329 1330 unsigned Int; 1331 switch (BuiltinID) { 1332 default: return 0; 1333 case ARM::BI__builtin_neon_vabd_v: 1334 case ARM::BI__builtin_neon_vabdq_v: 1335 Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds; 1336 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd"); 1337 case ARM::BI__builtin_neon_vabs_v: 1338 case ARM::BI__builtin_neon_vabsq_v: 1339 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vabs, Ty), 1340 Ops, "vabs"); 1341 case ARM::BI__builtin_neon_vaddhn_v: 1342 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vaddhn, Ty), 1343 Ops, "vaddhn"); 1344 case ARM::BI__builtin_neon_vcale_v: 1345 std::swap(Ops[0], Ops[1]); 1346 case ARM::BI__builtin_neon_vcage_v: { 1347 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacged); 1348 return EmitNeonCall(F, Ops, "vcage"); 1349 } 1350 case ARM::BI__builtin_neon_vcaleq_v: 1351 std::swap(Ops[0], Ops[1]); 1352 case ARM::BI__builtin_neon_vcageq_v: { 1353 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgeq); 1354 return EmitNeonCall(F, Ops, "vcage"); 1355 } 1356 case ARM::BI__builtin_neon_vcalt_v: 1357 std::swap(Ops[0], Ops[1]); 1358 case ARM::BI__builtin_neon_vcagt_v: { 1359 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtd); 1360 return EmitNeonCall(F, Ops, "vcagt"); 1361 } 1362 case ARM::BI__builtin_neon_vcaltq_v: 1363 std::swap(Ops[0], Ops[1]); 1364 case ARM::BI__builtin_neon_vcagtq_v: { 1365 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtq); 1366 return EmitNeonCall(F, Ops, "vcagt"); 1367 } 1368 case ARM::BI__builtin_neon_vcls_v: 1369 case ARM::BI__builtin_neon_vclsq_v: { 1370 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcls, Ty); 1371 return EmitNeonCall(F, Ops, "vcls"); 1372 } 1373 case ARM::BI__builtin_neon_vclz_v: 1374 case ARM::BI__builtin_neon_vclzq_v: { 1375 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vclz, Ty); 1376 return EmitNeonCall(F, Ops, "vclz"); 1377 } 1378 case ARM::BI__builtin_neon_vcnt_v: 1379 case ARM::BI__builtin_neon_vcntq_v: { 1380 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcnt, Ty); 1381 return EmitNeonCall(F, Ops, "vcnt"); 1382 } 1383 case ARM::BI__builtin_neon_vcvt_f16_v: { 1384 assert((type & 0x7) == 7 && !quad && "unexpected vcvt_f16_v builtin"); 1385 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcvtfp2hf); 1386 return EmitNeonCall(F, Ops, "vcvt"); 1387 } 1388 case ARM::BI__builtin_neon_vcvt_f32_f16: { 1389 assert((type & 0x7) == 7 && !quad && "unexpected vcvt_f32_f16 builtin"); 1390 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcvthf2fp); 1391 return EmitNeonCall(F, Ops, "vcvt"); 1392 } 1393 case ARM::BI__builtin_neon_vcvt_f32_v: 1394 case ARM::BI__builtin_neon_vcvtq_f32_v: { 1395 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1396 Ty = GetNeonType(getLLVMContext(), 4, quad); 1397 return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt") 1398 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt"); 1399 } 1400 case ARM::BI__builtin_neon_vcvt_s32_v: 1401 case ARM::BI__builtin_neon_vcvt_u32_v: 1402 case ARM::BI__builtin_neon_vcvtq_s32_v: 1403 case ARM::BI__builtin_neon_vcvtq_u32_v: { 1404 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(getLLVMContext(), 4, quad)); 1405 return usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt") 1406 : Builder.CreateFPToSI(Ops[0], Ty, "vcvt"); 1407 } 1408 case ARM::BI__builtin_neon_vcvt_n_f32_v: 1409 case ARM::BI__builtin_neon_vcvtq_n_f32_v: { 1410 llvm::Type *Tys[2] = { GetNeonType(getLLVMContext(), 4, quad), Ty }; 1411 Int = usgn ? Intrinsic::arm_neon_vcvtfxu2fp : Intrinsic::arm_neon_vcvtfxs2fp; 1412 Function *F = CGM.getIntrinsic(Int, Tys); 1413 return EmitNeonCall(F, Ops, "vcvt_n"); 1414 } 1415 case ARM::BI__builtin_neon_vcvt_n_s32_v: 1416 case ARM::BI__builtin_neon_vcvt_n_u32_v: 1417 case ARM::BI__builtin_neon_vcvtq_n_s32_v: 1418 case ARM::BI__builtin_neon_vcvtq_n_u32_v: { 1419 llvm::Type *Tys[2] = { Ty, GetNeonType(getLLVMContext(), 4, quad) }; 1420 Int = usgn ? Intrinsic::arm_neon_vcvtfp2fxu : Intrinsic::arm_neon_vcvtfp2fxs; 1421 Function *F = CGM.getIntrinsic(Int, Tys); 1422 return EmitNeonCall(F, Ops, "vcvt_n"); 1423 } 1424 case ARM::BI__builtin_neon_vext_v: 1425 case ARM::BI__builtin_neon_vextq_v: { 1426 int CV = cast<ConstantInt>(Ops[2])->getSExtValue(); 1427 SmallVector<Constant*, 16> Indices; 1428 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) 1429 Indices.push_back(ConstantInt::get(Int32Ty, i+CV)); 1430 1431 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1432 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 1433 Value *SV = llvm::ConstantVector::get(Indices); 1434 return Builder.CreateShuffleVector(Ops[0], Ops[1], SV, "vext"); 1435 } 1436 case ARM::BI__builtin_neon_vhadd_v: 1437 case ARM::BI__builtin_neon_vhaddq_v: 1438 Int = usgn ? Intrinsic::arm_neon_vhaddu : Intrinsic::arm_neon_vhadds; 1439 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vhadd"); 1440 case ARM::BI__builtin_neon_vhsub_v: 1441 case ARM::BI__builtin_neon_vhsubq_v: 1442 Int = usgn ? Intrinsic::arm_neon_vhsubu : Intrinsic::arm_neon_vhsubs; 1443 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vhsub"); 1444 case ARM::BI__builtin_neon_vld1_v: 1445 case ARM::BI__builtin_neon_vld1q_v: 1446 Ops.push_back(GetPointeeAlignment(*this, E->getArg(0))); 1447 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty), 1448 Ops, "vld1"); 1449 case ARM::BI__builtin_neon_vld1_lane_v: 1450 case ARM::BI__builtin_neon_vld1q_lane_v: 1451 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 1452 Ty = llvm::PointerType::getUnqual(VTy->getElementType()); 1453 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1454 Ops[0] = Builder.CreateLoad(Ops[0]); 1455 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane"); 1456 case ARM::BI__builtin_neon_vld1_dup_v: 1457 case ARM::BI__builtin_neon_vld1q_dup_v: { 1458 Value *V = UndefValue::get(Ty); 1459 Ty = llvm::PointerType::getUnqual(VTy->getElementType()); 1460 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1461 Ops[0] = Builder.CreateLoad(Ops[0]); 1462 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0); 1463 Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI); 1464 return EmitNeonSplat(Ops[0], CI); 1465 } 1466 case ARM::BI__builtin_neon_vld2_v: 1467 case ARM::BI__builtin_neon_vld2q_v: { 1468 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2, Ty); 1469 Value *Align = GetPointeeAlignment(*this, E->getArg(1)); 1470 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld2"); 1471 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 1472 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1473 return Builder.CreateStore(Ops[1], Ops[0]); 1474 } 1475 case ARM::BI__builtin_neon_vld3_v: 1476 case ARM::BI__builtin_neon_vld3q_v: { 1477 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3, Ty); 1478 Value *Align = GetPointeeAlignment(*this, E->getArg(1)); 1479 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld3"); 1480 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 1481 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1482 return Builder.CreateStore(Ops[1], Ops[0]); 1483 } 1484 case ARM::BI__builtin_neon_vld4_v: 1485 case ARM::BI__builtin_neon_vld4q_v: { 1486 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4, Ty); 1487 Value *Align = GetPointeeAlignment(*this, E->getArg(1)); 1488 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld4"); 1489 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 1490 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1491 return Builder.CreateStore(Ops[1], Ops[0]); 1492 } 1493 case ARM::BI__builtin_neon_vld2_lane_v: 1494 case ARM::BI__builtin_neon_vld2q_lane_v: { 1495 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2lane, Ty); 1496 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 1497 Ops[3] = Builder.CreateBitCast(Ops[3], Ty); 1498 Ops.push_back(GetPointeeAlignment(*this, E->getArg(1))); 1499 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane"); 1500 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 1501 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1502 return Builder.CreateStore(Ops[1], Ops[0]); 1503 } 1504 case ARM::BI__builtin_neon_vld3_lane_v: 1505 case ARM::BI__builtin_neon_vld3q_lane_v: { 1506 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3lane, Ty); 1507 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 1508 Ops[3] = Builder.CreateBitCast(Ops[3], Ty); 1509 Ops[4] = Builder.CreateBitCast(Ops[4], Ty); 1510 Ops.push_back(GetPointeeAlignment(*this, E->getArg(1))); 1511 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane"); 1512 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 1513 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1514 return Builder.CreateStore(Ops[1], Ops[0]); 1515 } 1516 case ARM::BI__builtin_neon_vld4_lane_v: 1517 case ARM::BI__builtin_neon_vld4q_lane_v: { 1518 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4lane, Ty); 1519 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 1520 Ops[3] = Builder.CreateBitCast(Ops[3], Ty); 1521 Ops[4] = Builder.CreateBitCast(Ops[4], Ty); 1522 Ops[5] = Builder.CreateBitCast(Ops[5], Ty); 1523 Ops.push_back(GetPointeeAlignment(*this, E->getArg(1))); 1524 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane"); 1525 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 1526 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1527 return Builder.CreateStore(Ops[1], Ops[0]); 1528 } 1529 case ARM::BI__builtin_neon_vld2_dup_v: 1530 case ARM::BI__builtin_neon_vld3_dup_v: 1531 case ARM::BI__builtin_neon_vld4_dup_v: { 1532 // Handle 64-bit elements as a special-case. There is no "dup" needed. 1533 if (VTy->getElementType()->getPrimitiveSizeInBits() == 64) { 1534 switch (BuiltinID) { 1535 case ARM::BI__builtin_neon_vld2_dup_v: 1536 Int = Intrinsic::arm_neon_vld2; 1537 break; 1538 case ARM::BI__builtin_neon_vld3_dup_v: 1539 Int = Intrinsic::arm_neon_vld2; 1540 break; 1541 case ARM::BI__builtin_neon_vld4_dup_v: 1542 Int = Intrinsic::arm_neon_vld2; 1543 break; 1544 default: assert(0 && "unknown vld_dup intrinsic?"); 1545 } 1546 Function *F = CGM.getIntrinsic(Int, Ty); 1547 Value *Align = GetPointeeAlignment(*this, E->getArg(1)); 1548 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld_dup"); 1549 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 1550 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1551 return Builder.CreateStore(Ops[1], Ops[0]); 1552 } 1553 switch (BuiltinID) { 1554 case ARM::BI__builtin_neon_vld2_dup_v: 1555 Int = Intrinsic::arm_neon_vld2lane; 1556 break; 1557 case ARM::BI__builtin_neon_vld3_dup_v: 1558 Int = Intrinsic::arm_neon_vld2lane; 1559 break; 1560 case ARM::BI__builtin_neon_vld4_dup_v: 1561 Int = Intrinsic::arm_neon_vld2lane; 1562 break; 1563 default: assert(0 && "unknown vld_dup intrinsic?"); 1564 } 1565 Function *F = CGM.getIntrinsic(Int, Ty); 1566 llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType()); 1567 1568 SmallVector<Value*, 6> Args; 1569 Args.push_back(Ops[1]); 1570 Args.append(STy->getNumElements(), UndefValue::get(Ty)); 1571 1572 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0); 1573 Args.push_back(CI); 1574 Args.push_back(GetPointeeAlignment(*this, E->getArg(1))); 1575 1576 Ops[1] = Builder.CreateCall(F, Args, "vld_dup"); 1577 // splat lane 0 to all elts in each vector of the result. 1578 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1579 Value *Val = Builder.CreateExtractValue(Ops[1], i); 1580 Value *Elt = Builder.CreateBitCast(Val, Ty); 1581 Elt = EmitNeonSplat(Elt, CI); 1582 Elt = Builder.CreateBitCast(Elt, Val->getType()); 1583 Ops[1] = Builder.CreateInsertValue(Ops[1], Elt, i); 1584 } 1585 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 1586 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1587 return Builder.CreateStore(Ops[1], Ops[0]); 1588 } 1589 case ARM::BI__builtin_neon_vmax_v: 1590 case ARM::BI__builtin_neon_vmaxq_v: 1591 Int = usgn ? Intrinsic::arm_neon_vmaxu : Intrinsic::arm_neon_vmaxs; 1592 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax"); 1593 case ARM::BI__builtin_neon_vmin_v: 1594 case ARM::BI__builtin_neon_vminq_v: 1595 Int = usgn ? Intrinsic::arm_neon_vminu : Intrinsic::arm_neon_vmins; 1596 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin"); 1597 case ARM::BI__builtin_neon_vmovl_v: { 1598 llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy); 1599 Ops[0] = Builder.CreateBitCast(Ops[0], DTy); 1600 if (usgn) 1601 return Builder.CreateZExt(Ops[0], Ty, "vmovl"); 1602 return Builder.CreateSExt(Ops[0], Ty, "vmovl"); 1603 } 1604 case ARM::BI__builtin_neon_vmovn_v: { 1605 llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy); 1606 Ops[0] = Builder.CreateBitCast(Ops[0], QTy); 1607 return Builder.CreateTrunc(Ops[0], Ty, "vmovn"); 1608 } 1609 case ARM::BI__builtin_neon_vmul_v: 1610 case ARM::BI__builtin_neon_vmulq_v: 1611 assert(poly && "vmul builtin only supported for polynomial types"); 1612 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmulp, Ty), 1613 Ops, "vmul"); 1614 case ARM::BI__builtin_neon_vmull_v: 1615 Int = usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls; 1616 Int = poly ? (unsigned)Intrinsic::arm_neon_vmullp : Int; 1617 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull"); 1618 case ARM::BI__builtin_neon_vpadal_v: 1619 case ARM::BI__builtin_neon_vpadalq_v: { 1620 Int = usgn ? Intrinsic::arm_neon_vpadalu : Intrinsic::arm_neon_vpadals; 1621 // The source operand type has twice as many elements of half the size. 1622 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); 1623 llvm::Type *EltTy = 1624 llvm::IntegerType::get(getLLVMContext(), EltBits / 2); 1625 llvm::Type *NarrowTy = 1626 llvm::VectorType::get(EltTy, VTy->getNumElements() * 2); 1627 llvm::Type *Tys[2] = { Ty, NarrowTy }; 1628 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpadal"); 1629 } 1630 case ARM::BI__builtin_neon_vpadd_v: 1631 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vpadd, Ty), 1632 Ops, "vpadd"); 1633 case ARM::BI__builtin_neon_vpaddl_v: 1634 case ARM::BI__builtin_neon_vpaddlq_v: { 1635 Int = usgn ? Intrinsic::arm_neon_vpaddlu : Intrinsic::arm_neon_vpaddls; 1636 // The source operand type has twice as many elements of half the size. 1637 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); 1638 llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2); 1639 llvm::Type *NarrowTy = 1640 llvm::VectorType::get(EltTy, VTy->getNumElements() * 2); 1641 llvm::Type *Tys[2] = { Ty, NarrowTy }; 1642 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl"); 1643 } 1644 case ARM::BI__builtin_neon_vpmax_v: 1645 Int = usgn ? Intrinsic::arm_neon_vpmaxu : Intrinsic::arm_neon_vpmaxs; 1646 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax"); 1647 case ARM::BI__builtin_neon_vpmin_v: 1648 Int = usgn ? Intrinsic::arm_neon_vpminu : Intrinsic::arm_neon_vpmins; 1649 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin"); 1650 case ARM::BI__builtin_neon_vqabs_v: 1651 case ARM::BI__builtin_neon_vqabsq_v: 1652 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqabs, Ty), 1653 Ops, "vqabs"); 1654 case ARM::BI__builtin_neon_vqadd_v: 1655 case ARM::BI__builtin_neon_vqaddq_v: 1656 Int = usgn ? Intrinsic::arm_neon_vqaddu : Intrinsic::arm_neon_vqadds; 1657 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqadd"); 1658 case ARM::BI__builtin_neon_vqdmlal_v: 1659 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlal, Ty), 1660 Ops, "vqdmlal"); 1661 case ARM::BI__builtin_neon_vqdmlsl_v: 1662 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlsl, Ty), 1663 Ops, "vqdmlsl"); 1664 case ARM::BI__builtin_neon_vqdmulh_v: 1665 case ARM::BI__builtin_neon_vqdmulhq_v: 1666 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmulh, Ty), 1667 Ops, "vqdmulh"); 1668 case ARM::BI__builtin_neon_vqdmull_v: 1669 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, Ty), 1670 Ops, "vqdmull"); 1671 case ARM::BI__builtin_neon_vqmovn_v: 1672 Int = usgn ? Intrinsic::arm_neon_vqmovnu : Intrinsic::arm_neon_vqmovns; 1673 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqmovn"); 1674 case ARM::BI__builtin_neon_vqmovun_v: 1675 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqmovnsu, Ty), 1676 Ops, "vqdmull"); 1677 case ARM::BI__builtin_neon_vqneg_v: 1678 case ARM::BI__builtin_neon_vqnegq_v: 1679 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqneg, Ty), 1680 Ops, "vqneg"); 1681 case ARM::BI__builtin_neon_vqrdmulh_v: 1682 case ARM::BI__builtin_neon_vqrdmulhq_v: 1683 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrdmulh, Ty), 1684 Ops, "vqrdmulh"); 1685 case ARM::BI__builtin_neon_vqrshl_v: 1686 case ARM::BI__builtin_neon_vqrshlq_v: 1687 Int = usgn ? Intrinsic::arm_neon_vqrshiftu : Intrinsic::arm_neon_vqrshifts; 1688 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshl"); 1689 case ARM::BI__builtin_neon_vqrshrn_n_v: 1690 Int = usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns; 1691 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n", 1692 1, true); 1693 case ARM::BI__builtin_neon_vqrshrun_n_v: 1694 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty), 1695 Ops, "vqrshrun_n", 1, true); 1696 case ARM::BI__builtin_neon_vqshl_v: 1697 case ARM::BI__builtin_neon_vqshlq_v: 1698 Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts; 1699 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl"); 1700 case ARM::BI__builtin_neon_vqshl_n_v: 1701 case ARM::BI__builtin_neon_vqshlq_n_v: 1702 Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts; 1703 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n", 1704 1, false); 1705 case ARM::BI__builtin_neon_vqshlu_n_v: 1706 case ARM::BI__builtin_neon_vqshluq_n_v: 1707 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftsu, Ty), 1708 Ops, "vqshlu", 1, false); 1709 case ARM::BI__builtin_neon_vqshrn_n_v: 1710 Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns; 1711 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n", 1712 1, true); 1713 case ARM::BI__builtin_neon_vqshrun_n_v: 1714 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty), 1715 Ops, "vqshrun_n", 1, true); 1716 case ARM::BI__builtin_neon_vqsub_v: 1717 case ARM::BI__builtin_neon_vqsubq_v: 1718 Int = usgn ? Intrinsic::arm_neon_vqsubu : Intrinsic::arm_neon_vqsubs; 1719 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqsub"); 1720 case ARM::BI__builtin_neon_vraddhn_v: 1721 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vraddhn, Ty), 1722 Ops, "vraddhn"); 1723 case ARM::BI__builtin_neon_vrecpe_v: 1724 case ARM::BI__builtin_neon_vrecpeq_v: 1725 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty), 1726 Ops, "vrecpe"); 1727 case ARM::BI__builtin_neon_vrecps_v: 1728 case ARM::BI__builtin_neon_vrecpsq_v: 1729 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecps, Ty), 1730 Ops, "vrecps"); 1731 case ARM::BI__builtin_neon_vrhadd_v: 1732 case ARM::BI__builtin_neon_vrhaddq_v: 1733 Int = usgn ? Intrinsic::arm_neon_vrhaddu : Intrinsic::arm_neon_vrhadds; 1734 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrhadd"); 1735 case ARM::BI__builtin_neon_vrshl_v: 1736 case ARM::BI__builtin_neon_vrshlq_v: 1737 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts; 1738 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshl"); 1739 case ARM::BI__builtin_neon_vrshrn_n_v: 1740 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty), 1741 Ops, "vrshrn_n", 1, true); 1742 case ARM::BI__builtin_neon_vrshr_n_v: 1743 case ARM::BI__builtin_neon_vrshrq_n_v: 1744 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts; 1745 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n", 1, true); 1746 case ARM::BI__builtin_neon_vrsqrte_v: 1747 case ARM::BI__builtin_neon_vrsqrteq_v: 1748 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrte, Ty), 1749 Ops, "vrsqrte"); 1750 case ARM::BI__builtin_neon_vrsqrts_v: 1751 case ARM::BI__builtin_neon_vrsqrtsq_v: 1752 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrts, Ty), 1753 Ops, "vrsqrts"); 1754 case ARM::BI__builtin_neon_vrsra_n_v: 1755 case ARM::BI__builtin_neon_vrsraq_n_v: 1756 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1757 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 1758 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true); 1759 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts; 1760 Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Ty), Ops[1], Ops[2]); 1761 return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n"); 1762 case ARM::BI__builtin_neon_vrsubhn_v: 1763 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsubhn, Ty), 1764 Ops, "vrsubhn"); 1765 case ARM::BI__builtin_neon_vshl_v: 1766 case ARM::BI__builtin_neon_vshlq_v: 1767 Int = usgn ? Intrinsic::arm_neon_vshiftu : Intrinsic::arm_neon_vshifts; 1768 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vshl"); 1769 case ARM::BI__builtin_neon_vshll_n_v: 1770 Int = usgn ? Intrinsic::arm_neon_vshiftlu : Intrinsic::arm_neon_vshiftls; 1771 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vshll", 1); 1772 case ARM::BI__builtin_neon_vshl_n_v: 1773 case ARM::BI__builtin_neon_vshlq_n_v: 1774 Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false); 1775 return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1], "vshl_n"); 1776 case ARM::BI__builtin_neon_vshrn_n_v: 1777 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftn, Ty), 1778 Ops, "vshrn_n", 1, true); 1779 case ARM::BI__builtin_neon_vshr_n_v: 1780 case ARM::BI__builtin_neon_vshrq_n_v: 1781 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1782 Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false); 1783 if (usgn) 1784 return Builder.CreateLShr(Ops[0], Ops[1], "vshr_n"); 1785 else 1786 return Builder.CreateAShr(Ops[0], Ops[1], "vshr_n"); 1787 case ARM::BI__builtin_neon_vsri_n_v: 1788 case ARM::BI__builtin_neon_vsriq_n_v: 1789 rightShift = true; 1790 case ARM::BI__builtin_neon_vsli_n_v: 1791 case ARM::BI__builtin_neon_vsliq_n_v: 1792 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift); 1793 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty), 1794 Ops, "vsli_n"); 1795 case ARM::BI__builtin_neon_vsra_n_v: 1796 case ARM::BI__builtin_neon_vsraq_n_v: 1797 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1798 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 1799 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, false); 1800 if (usgn) 1801 Ops[1] = Builder.CreateLShr(Ops[1], Ops[2], "vsra_n"); 1802 else 1803 Ops[1] = Builder.CreateAShr(Ops[1], Ops[2], "vsra_n"); 1804 return Builder.CreateAdd(Ops[0], Ops[1]); 1805 case ARM::BI__builtin_neon_vst1_v: 1806 case ARM::BI__builtin_neon_vst1q_v: 1807 Ops.push_back(GetPointeeAlignment(*this, E->getArg(0))); 1808 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, Ty), 1809 Ops, ""); 1810 case ARM::BI__builtin_neon_vst1_lane_v: 1811 case ARM::BI__builtin_neon_vst1q_lane_v: 1812 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 1813 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]); 1814 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 1815 return Builder.CreateStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty)); 1816 case ARM::BI__builtin_neon_vst2_v: 1817 case ARM::BI__builtin_neon_vst2q_v: 1818 Ops.push_back(GetPointeeAlignment(*this, E->getArg(0))); 1819 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2, Ty), 1820 Ops, ""); 1821 case ARM::BI__builtin_neon_vst2_lane_v: 1822 case ARM::BI__builtin_neon_vst2q_lane_v: 1823 Ops.push_back(GetPointeeAlignment(*this, E->getArg(0))); 1824 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2lane, Ty), 1825 Ops, ""); 1826 case ARM::BI__builtin_neon_vst3_v: 1827 case ARM::BI__builtin_neon_vst3q_v: 1828 Ops.push_back(GetPointeeAlignment(*this, E->getArg(0))); 1829 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3, Ty), 1830 Ops, ""); 1831 case ARM::BI__builtin_neon_vst3_lane_v: 1832 case ARM::BI__builtin_neon_vst3q_lane_v: 1833 Ops.push_back(GetPointeeAlignment(*this, E->getArg(0))); 1834 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3lane, Ty), 1835 Ops, ""); 1836 case ARM::BI__builtin_neon_vst4_v: 1837 case ARM::BI__builtin_neon_vst4q_v: 1838 Ops.push_back(GetPointeeAlignment(*this, E->getArg(0))); 1839 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4, Ty), 1840 Ops, ""); 1841 case ARM::BI__builtin_neon_vst4_lane_v: 1842 case ARM::BI__builtin_neon_vst4q_lane_v: 1843 Ops.push_back(GetPointeeAlignment(*this, E->getArg(0))); 1844 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4lane, Ty), 1845 Ops, ""); 1846 case ARM::BI__builtin_neon_vsubhn_v: 1847 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vsubhn, Ty), 1848 Ops, "vsubhn"); 1849 case ARM::BI__builtin_neon_vtbl1_v: 1850 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1), 1851 Ops, "vtbl1"); 1852 case ARM::BI__builtin_neon_vtbl2_v: 1853 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2), 1854 Ops, "vtbl2"); 1855 case ARM::BI__builtin_neon_vtbl3_v: 1856 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3), 1857 Ops, "vtbl3"); 1858 case ARM::BI__builtin_neon_vtbl4_v: 1859 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4), 1860 Ops, "vtbl4"); 1861 case ARM::BI__builtin_neon_vtbx1_v: 1862 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1), 1863 Ops, "vtbx1"); 1864 case ARM::BI__builtin_neon_vtbx2_v: 1865 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2), 1866 Ops, "vtbx2"); 1867 case ARM::BI__builtin_neon_vtbx3_v: 1868 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3), 1869 Ops, "vtbx3"); 1870 case ARM::BI__builtin_neon_vtbx4_v: 1871 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4), 1872 Ops, "vtbx4"); 1873 case ARM::BI__builtin_neon_vtst_v: 1874 case ARM::BI__builtin_neon_vtstq_v: { 1875 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1876 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 1877 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]); 1878 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0], 1879 ConstantAggregateZero::get(Ty)); 1880 return Builder.CreateSExt(Ops[0], Ty, "vtst"); 1881 } 1882 case ARM::BI__builtin_neon_vtrn_v: 1883 case ARM::BI__builtin_neon_vtrnq_v: { 1884 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); 1885 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 1886 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 1887 Value *SV = 0; 1888 1889 for (unsigned vi = 0; vi != 2; ++vi) { 1890 SmallVector<Constant*, 16> Indices; 1891 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { 1892 Indices.push_back(ConstantInt::get(Int32Ty, i+vi)); 1893 Indices.push_back(ConstantInt::get(Int32Ty, i+e+vi)); 1894 } 1895 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi); 1896 SV = llvm::ConstantVector::get(Indices); 1897 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn"); 1898 SV = Builder.CreateStore(SV, Addr); 1899 } 1900 return SV; 1901 } 1902 case ARM::BI__builtin_neon_vuzp_v: 1903 case ARM::BI__builtin_neon_vuzpq_v: { 1904 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); 1905 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 1906 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 1907 Value *SV = 0; 1908 1909 for (unsigned vi = 0; vi != 2; ++vi) { 1910 SmallVector<Constant*, 16> Indices; 1911 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) 1912 Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi)); 1913 1914 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi); 1915 SV = llvm::ConstantVector::get(Indices); 1916 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp"); 1917 SV = Builder.CreateStore(SV, Addr); 1918 } 1919 return SV; 1920 } 1921 case ARM::BI__builtin_neon_vzip_v: 1922 case ARM::BI__builtin_neon_vzipq_v: { 1923 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); 1924 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 1925 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 1926 Value *SV = 0; 1927 1928 for (unsigned vi = 0; vi != 2; ++vi) { 1929 SmallVector<Constant*, 16> Indices; 1930 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { 1931 Indices.push_back(ConstantInt::get(Int32Ty, (i + vi*e) >> 1)); 1932 Indices.push_back(ConstantInt::get(Int32Ty, ((i + vi*e) >> 1)+e)); 1933 } 1934 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi); 1935 SV = llvm::ConstantVector::get(Indices); 1936 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip"); 1937 SV = Builder.CreateStore(SV, Addr); 1938 } 1939 return SV; 1940 } 1941 } 1942} 1943 1944llvm::Value *CodeGenFunction:: 1945BuildVector(const SmallVectorImpl<llvm::Value*> &Ops) { 1946 assert((Ops.size() & (Ops.size() - 1)) == 0 && 1947 "Not a power-of-two sized vector!"); 1948 bool AllConstants = true; 1949 for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i) 1950 AllConstants &= isa<Constant>(Ops[i]); 1951 1952 // If this is a constant vector, create a ConstantVector. 1953 if (AllConstants) { 1954 std::vector<llvm::Constant*> CstOps; 1955 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1956 CstOps.push_back(cast<Constant>(Ops[i])); 1957 return llvm::ConstantVector::get(CstOps); 1958 } 1959 1960 // Otherwise, insertelement the values to build the vector. 1961 Value *Result = 1962 llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), Ops.size())); 1963 1964 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1965 Result = Builder.CreateInsertElement(Result, Ops[i], 1966 llvm::ConstantInt::get(llvm::Type::getInt32Ty(getLLVMContext()), i)); 1967 1968 return Result; 1969} 1970 1971Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, 1972 const CallExpr *E) { 1973 SmallVector<Value*, 4> Ops; 1974 1975 // Find out if any arguments are required to be integer constant expressions. 1976 unsigned ICEArguments = 0; 1977 ASTContext::GetBuiltinTypeError Error; 1978 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); 1979 assert(Error == ASTContext::GE_None && "Should not codegen an error"); 1980 1981 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) { 1982 // If this is a normal argument, just emit it as a scalar. 1983 if ((ICEArguments & (1 << i)) == 0) { 1984 Ops.push_back(EmitScalarExpr(E->getArg(i))); 1985 continue; 1986 } 1987 1988 // If this is required to be a constant, constant fold it so that we know 1989 // that the generated intrinsic gets a ConstantInt. 1990 llvm::APSInt Result; 1991 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext()); 1992 assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst; 1993 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result)); 1994 } 1995 1996 switch (BuiltinID) { 1997 default: return 0; 1998 case X86::BI__builtin_ia32_pslldi128: 1999 case X86::BI__builtin_ia32_psllqi128: 2000 case X86::BI__builtin_ia32_psllwi128: 2001 case X86::BI__builtin_ia32_psradi128: 2002 case X86::BI__builtin_ia32_psrawi128: 2003 case X86::BI__builtin_ia32_psrldi128: 2004 case X86::BI__builtin_ia32_psrlqi128: 2005 case X86::BI__builtin_ia32_psrlwi128: { 2006 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty, "zext"); 2007 llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 2); 2008 llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0); 2009 Ops[1] = Builder.CreateInsertElement(llvm::UndefValue::get(Ty), 2010 Ops[1], Zero, "insert"); 2011 Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType(), "bitcast"); 2012 const char *name = 0; 2013 Intrinsic::ID ID = Intrinsic::not_intrinsic; 2014 2015 switch (BuiltinID) { 2016 default: assert(0 && "Unsupported shift intrinsic!"); 2017 case X86::BI__builtin_ia32_pslldi128: 2018 name = "pslldi"; 2019 ID = Intrinsic::x86_sse2_psll_d; 2020 break; 2021 case X86::BI__builtin_ia32_psllqi128: 2022 name = "psllqi"; 2023 ID = Intrinsic::x86_sse2_psll_q; 2024 break; 2025 case X86::BI__builtin_ia32_psllwi128: 2026 name = "psllwi"; 2027 ID = Intrinsic::x86_sse2_psll_w; 2028 break; 2029 case X86::BI__builtin_ia32_psradi128: 2030 name = "psradi"; 2031 ID = Intrinsic::x86_sse2_psra_d; 2032 break; 2033 case X86::BI__builtin_ia32_psrawi128: 2034 name = "psrawi"; 2035 ID = Intrinsic::x86_sse2_psra_w; 2036 break; 2037 case X86::BI__builtin_ia32_psrldi128: 2038 name = "psrldi"; 2039 ID = Intrinsic::x86_sse2_psrl_d; 2040 break; 2041 case X86::BI__builtin_ia32_psrlqi128: 2042 name = "psrlqi"; 2043 ID = Intrinsic::x86_sse2_psrl_q; 2044 break; 2045 case X86::BI__builtin_ia32_psrlwi128: 2046 name = "psrlwi"; 2047 ID = Intrinsic::x86_sse2_psrl_w; 2048 break; 2049 } 2050 llvm::Function *F = CGM.getIntrinsic(ID); 2051 return Builder.CreateCall(F, Ops, name); 2052 } 2053 case X86::BI__builtin_ia32_vec_init_v8qi: 2054 case X86::BI__builtin_ia32_vec_init_v4hi: 2055 case X86::BI__builtin_ia32_vec_init_v2si: 2056 return Builder.CreateBitCast(BuildVector(Ops), 2057 llvm::Type::getX86_MMXTy(getLLVMContext())); 2058 case X86::BI__builtin_ia32_vec_ext_v2si: 2059 return Builder.CreateExtractElement(Ops[0], 2060 llvm::ConstantInt::get(Ops[1]->getType(), 0)); 2061 case X86::BI__builtin_ia32_pslldi: 2062 case X86::BI__builtin_ia32_psllqi: 2063 case X86::BI__builtin_ia32_psllwi: 2064 case X86::BI__builtin_ia32_psradi: 2065 case X86::BI__builtin_ia32_psrawi: 2066 case X86::BI__builtin_ia32_psrldi: 2067 case X86::BI__builtin_ia32_psrlqi: 2068 case X86::BI__builtin_ia32_psrlwi: { 2069 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty, "zext"); 2070 llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 1); 2071 Ops[1] = Builder.CreateBitCast(Ops[1], Ty, "bitcast"); 2072 const char *name = 0; 2073 Intrinsic::ID ID = Intrinsic::not_intrinsic; 2074 2075 switch (BuiltinID) { 2076 default: assert(0 && "Unsupported shift intrinsic!"); 2077 case X86::BI__builtin_ia32_pslldi: 2078 name = "pslldi"; 2079 ID = Intrinsic::x86_mmx_psll_d; 2080 break; 2081 case X86::BI__builtin_ia32_psllqi: 2082 name = "psllqi"; 2083 ID = Intrinsic::x86_mmx_psll_q; 2084 break; 2085 case X86::BI__builtin_ia32_psllwi: 2086 name = "psllwi"; 2087 ID = Intrinsic::x86_mmx_psll_w; 2088 break; 2089 case X86::BI__builtin_ia32_psradi: 2090 name = "psradi"; 2091 ID = Intrinsic::x86_mmx_psra_d; 2092 break; 2093 case X86::BI__builtin_ia32_psrawi: 2094 name = "psrawi"; 2095 ID = Intrinsic::x86_mmx_psra_w; 2096 break; 2097 case X86::BI__builtin_ia32_psrldi: 2098 name = "psrldi"; 2099 ID = Intrinsic::x86_mmx_psrl_d; 2100 break; 2101 case X86::BI__builtin_ia32_psrlqi: 2102 name = "psrlqi"; 2103 ID = Intrinsic::x86_mmx_psrl_q; 2104 break; 2105 case X86::BI__builtin_ia32_psrlwi: 2106 name = "psrlwi"; 2107 ID = Intrinsic::x86_mmx_psrl_w; 2108 break; 2109 } 2110 llvm::Function *F = CGM.getIntrinsic(ID); 2111 return Builder.CreateCall(F, Ops, name); 2112 } 2113 case X86::BI__builtin_ia32_cmpps: { 2114 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ps); 2115 return Builder.CreateCall(F, Ops, "cmpps"); 2116 } 2117 case X86::BI__builtin_ia32_cmpss: { 2118 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ss); 2119 return Builder.CreateCall(F, Ops, "cmpss"); 2120 } 2121 case X86::BI__builtin_ia32_ldmxcsr: { 2122 llvm::Type *PtrTy = Int8PtrTy; 2123 Value *One = llvm::ConstantInt::get(Int32Ty, 1); 2124 Value *Tmp = Builder.CreateAlloca(Int32Ty, One, "tmp"); 2125 Builder.CreateStore(Ops[0], Tmp); 2126 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr), 2127 Builder.CreateBitCast(Tmp, PtrTy)); 2128 } 2129 case X86::BI__builtin_ia32_stmxcsr: { 2130 llvm::Type *PtrTy = Int8PtrTy; 2131 Value *One = llvm::ConstantInt::get(Int32Ty, 1); 2132 Value *Tmp = Builder.CreateAlloca(Int32Ty, One, "tmp"); 2133 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr), 2134 Builder.CreateBitCast(Tmp, PtrTy)); 2135 return Builder.CreateLoad(Tmp, "stmxcsr"); 2136 } 2137 case X86::BI__builtin_ia32_cmppd: { 2138 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_pd); 2139 return Builder.CreateCall(F, Ops, "cmppd"); 2140 } 2141 case X86::BI__builtin_ia32_cmpsd: { 2142 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_sd); 2143 return Builder.CreateCall(F, Ops, "cmpsd"); 2144 } 2145 case X86::BI__builtin_ia32_storehps: 2146 case X86::BI__builtin_ia32_storelps: { 2147 llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty); 2148 llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2); 2149 2150 // cast val v2i64 2151 Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast"); 2152 2153 // extract (0, 1) 2154 unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1; 2155 llvm::Value *Idx = llvm::ConstantInt::get(Int32Ty, Index); 2156 Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract"); 2157 2158 // cast pointer to i64 & store 2159 Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy); 2160 return Builder.CreateStore(Ops[1], Ops[0]); 2161 } 2162 case X86::BI__builtin_ia32_palignr: { 2163 unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); 2164 2165 // If palignr is shifting the pair of input vectors less than 9 bytes, 2166 // emit a shuffle instruction. 2167 if (shiftVal <= 8) { 2168 SmallVector<llvm::Constant*, 8> Indices; 2169 for (unsigned i = 0; i != 8; ++i) 2170 Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i)); 2171 2172 Value* SV = llvm::ConstantVector::get(Indices); 2173 return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr"); 2174 } 2175 2176 // If palignr is shifting the pair of input vectors more than 8 but less 2177 // than 16 bytes, emit a logical right shift of the destination. 2178 if (shiftVal < 16) { 2179 // MMX has these as 1 x i64 vectors for some odd optimization reasons. 2180 llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 1); 2181 2182 Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast"); 2183 Ops[1] = llvm::ConstantInt::get(VecTy, (shiftVal-8) * 8); 2184 2185 // create i32 constant 2186 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_mmx_psrl_q); 2187 return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr"); 2188 } 2189 2190 // If palignr is shifting the pair of vectors more than 16 bytes, emit zero. 2191 return llvm::Constant::getNullValue(ConvertType(E->getType())); 2192 } 2193 case X86::BI__builtin_ia32_palignr128: { 2194 unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); 2195 2196 // If palignr is shifting the pair of input vectors less than 17 bytes, 2197 // emit a shuffle instruction. 2198 if (shiftVal <= 16) { 2199 SmallVector<llvm::Constant*, 16> Indices; 2200 for (unsigned i = 0; i != 16; ++i) 2201 Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i)); 2202 2203 Value* SV = llvm::ConstantVector::get(Indices); 2204 return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr"); 2205 } 2206 2207 // If palignr is shifting the pair of input vectors more than 16 but less 2208 // than 32 bytes, emit a logical right shift of the destination. 2209 if (shiftVal < 32) { 2210 llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2); 2211 2212 Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast"); 2213 Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8); 2214 2215 // create i32 constant 2216 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq); 2217 return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr"); 2218 } 2219 2220 // If palignr is shifting the pair of vectors more than 32 bytes, emit zero. 2221 return llvm::Constant::getNullValue(ConvertType(E->getType())); 2222 } 2223 case X86::BI__builtin_ia32_movntps: 2224 case X86::BI__builtin_ia32_movntpd: 2225 case X86::BI__builtin_ia32_movntdq: 2226 case X86::BI__builtin_ia32_movnti: { 2227 llvm::MDNode *Node = llvm::MDNode::get(getLLVMContext(), 2228 Builder.getInt32(1)); 2229 2230 // Convert the type of the pointer to a pointer to the stored type. 2231 Value *BC = Builder.CreateBitCast(Ops[0], 2232 llvm::PointerType::getUnqual(Ops[1]->getType()), 2233 "cast"); 2234 StoreInst *SI = Builder.CreateStore(Ops[1], BC); 2235 SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node); 2236 SI->setAlignment(16); 2237 return SI; 2238 } 2239 // 3DNow! 2240 case X86::BI__builtin_ia32_pavgusb: 2241 case X86::BI__builtin_ia32_pf2id: 2242 case X86::BI__builtin_ia32_pfacc: 2243 case X86::BI__builtin_ia32_pfadd: 2244 case X86::BI__builtin_ia32_pfcmpeq: 2245 case X86::BI__builtin_ia32_pfcmpge: 2246 case X86::BI__builtin_ia32_pfcmpgt: 2247 case X86::BI__builtin_ia32_pfmax: 2248 case X86::BI__builtin_ia32_pfmin: 2249 case X86::BI__builtin_ia32_pfmul: 2250 case X86::BI__builtin_ia32_pfrcp: 2251 case X86::BI__builtin_ia32_pfrcpit1: 2252 case X86::BI__builtin_ia32_pfrcpit2: 2253 case X86::BI__builtin_ia32_pfrsqrt: 2254 case X86::BI__builtin_ia32_pfrsqit1: 2255 case X86::BI__builtin_ia32_pfrsqrtit1: 2256 case X86::BI__builtin_ia32_pfsub: 2257 case X86::BI__builtin_ia32_pfsubr: 2258 case X86::BI__builtin_ia32_pi2fd: 2259 case X86::BI__builtin_ia32_pmulhrw: 2260 case X86::BI__builtin_ia32_pf2iw: 2261 case X86::BI__builtin_ia32_pfnacc: 2262 case X86::BI__builtin_ia32_pfpnacc: 2263 case X86::BI__builtin_ia32_pi2fw: 2264 case X86::BI__builtin_ia32_pswapdsf: 2265 case X86::BI__builtin_ia32_pswapdsi: { 2266 const char *name = 0; 2267 Intrinsic::ID ID = Intrinsic::not_intrinsic; 2268 switch(BuiltinID) { 2269 case X86::BI__builtin_ia32_pavgusb: 2270 name = "pavgusb"; 2271 ID = Intrinsic::x86_3dnow_pavgusb; 2272 break; 2273 case X86::BI__builtin_ia32_pf2id: 2274 name = "pf2id"; 2275 ID = Intrinsic::x86_3dnow_pf2id; 2276 break; 2277 case X86::BI__builtin_ia32_pfacc: 2278 name = "pfacc"; 2279 ID = Intrinsic::x86_3dnow_pfacc; 2280 break; 2281 case X86::BI__builtin_ia32_pfadd: 2282 name = "pfadd"; 2283 ID = Intrinsic::x86_3dnow_pfadd; 2284 break; 2285 case X86::BI__builtin_ia32_pfcmpeq: 2286 name = "pfcmpeq"; 2287 ID = Intrinsic::x86_3dnow_pfcmpeq; 2288 break; 2289 case X86::BI__builtin_ia32_pfcmpge: 2290 name = "pfcmpge"; 2291 ID = Intrinsic::x86_3dnow_pfcmpge; 2292 break; 2293 case X86::BI__builtin_ia32_pfcmpgt: 2294 name = "pfcmpgt"; 2295 ID = Intrinsic::x86_3dnow_pfcmpgt; 2296 break; 2297 case X86::BI__builtin_ia32_pfmax: 2298 name = "pfmax"; 2299 ID = Intrinsic::x86_3dnow_pfmax; 2300 break; 2301 case X86::BI__builtin_ia32_pfmin: 2302 name = "pfmin"; 2303 ID = Intrinsic::x86_3dnow_pfmin; 2304 break; 2305 case X86::BI__builtin_ia32_pfmul: 2306 name = "pfmul"; 2307 ID = Intrinsic::x86_3dnow_pfmul; 2308 break; 2309 case X86::BI__builtin_ia32_pfrcp: 2310 name = "pfrcp"; 2311 ID = Intrinsic::x86_3dnow_pfrcp; 2312 break; 2313 case X86::BI__builtin_ia32_pfrcpit1: 2314 name = "pfrcpit1"; 2315 ID = Intrinsic::x86_3dnow_pfrcpit1; 2316 break; 2317 case X86::BI__builtin_ia32_pfrcpit2: 2318 name = "pfrcpit2"; 2319 ID = Intrinsic::x86_3dnow_pfrcpit2; 2320 break; 2321 case X86::BI__builtin_ia32_pfrsqrt: 2322 name = "pfrsqrt"; 2323 ID = Intrinsic::x86_3dnow_pfrsqrt; 2324 break; 2325 case X86::BI__builtin_ia32_pfrsqit1: 2326 case X86::BI__builtin_ia32_pfrsqrtit1: 2327 name = "pfrsqit1"; 2328 ID = Intrinsic::x86_3dnow_pfrsqit1; 2329 break; 2330 case X86::BI__builtin_ia32_pfsub: 2331 name = "pfsub"; 2332 ID = Intrinsic::x86_3dnow_pfsub; 2333 break; 2334 case X86::BI__builtin_ia32_pfsubr: 2335 name = "pfsubr"; 2336 ID = Intrinsic::x86_3dnow_pfsubr; 2337 break; 2338 case X86::BI__builtin_ia32_pi2fd: 2339 name = "pi2fd"; 2340 ID = Intrinsic::x86_3dnow_pi2fd; 2341 break; 2342 case X86::BI__builtin_ia32_pmulhrw: 2343 name = "pmulhrw"; 2344 ID = Intrinsic::x86_3dnow_pmulhrw; 2345 break; 2346 case X86::BI__builtin_ia32_pf2iw: 2347 name = "pf2iw"; 2348 ID = Intrinsic::x86_3dnowa_pf2iw; 2349 break; 2350 case X86::BI__builtin_ia32_pfnacc: 2351 name = "pfnacc"; 2352 ID = Intrinsic::x86_3dnowa_pfnacc; 2353 break; 2354 case X86::BI__builtin_ia32_pfpnacc: 2355 name = "pfpnacc"; 2356 ID = Intrinsic::x86_3dnowa_pfpnacc; 2357 break; 2358 case X86::BI__builtin_ia32_pi2fw: 2359 name = "pi2fw"; 2360 ID = Intrinsic::x86_3dnowa_pi2fw; 2361 break; 2362 case X86::BI__builtin_ia32_pswapdsf: 2363 case X86::BI__builtin_ia32_pswapdsi: 2364 name = "pswapd"; 2365 ID = Intrinsic::x86_3dnowa_pswapd; 2366 break; 2367 } 2368 llvm::Function *F = CGM.getIntrinsic(ID); 2369 return Builder.CreateCall(F, Ops, name); 2370 } 2371 } 2372} 2373 2374Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID, 2375 const CallExpr *E) { 2376 SmallVector<Value*, 4> Ops; 2377 2378 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) 2379 Ops.push_back(EmitScalarExpr(E->getArg(i))); 2380 2381 Intrinsic::ID ID = Intrinsic::not_intrinsic; 2382 2383 switch (BuiltinID) { 2384 default: return 0; 2385 2386 // vec_ld, vec_lvsl, vec_lvsr 2387 case PPC::BI__builtin_altivec_lvx: 2388 case PPC::BI__builtin_altivec_lvxl: 2389 case PPC::BI__builtin_altivec_lvebx: 2390 case PPC::BI__builtin_altivec_lvehx: 2391 case PPC::BI__builtin_altivec_lvewx: 2392 case PPC::BI__builtin_altivec_lvsl: 2393 case PPC::BI__builtin_altivec_lvsr: 2394 { 2395 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy); 2396 2397 Ops[0] = Builder.CreateGEP(Ops[1], Ops[0], "tmp"); 2398 Ops.pop_back(); 2399 2400 switch (BuiltinID) { 2401 default: assert(0 && "Unsupported ld/lvsl/lvsr intrinsic!"); 2402 case PPC::BI__builtin_altivec_lvx: 2403 ID = Intrinsic::ppc_altivec_lvx; 2404 break; 2405 case PPC::BI__builtin_altivec_lvxl: 2406 ID = Intrinsic::ppc_altivec_lvxl; 2407 break; 2408 case PPC::BI__builtin_altivec_lvebx: 2409 ID = Intrinsic::ppc_altivec_lvebx; 2410 break; 2411 case PPC::BI__builtin_altivec_lvehx: 2412 ID = Intrinsic::ppc_altivec_lvehx; 2413 break; 2414 case PPC::BI__builtin_altivec_lvewx: 2415 ID = Intrinsic::ppc_altivec_lvewx; 2416 break; 2417 case PPC::BI__builtin_altivec_lvsl: 2418 ID = Intrinsic::ppc_altivec_lvsl; 2419 break; 2420 case PPC::BI__builtin_altivec_lvsr: 2421 ID = Intrinsic::ppc_altivec_lvsr; 2422 break; 2423 } 2424 llvm::Function *F = CGM.getIntrinsic(ID); 2425 return Builder.CreateCall(F, Ops, ""); 2426 } 2427 2428 // vec_st 2429 case PPC::BI__builtin_altivec_stvx: 2430 case PPC::BI__builtin_altivec_stvxl: 2431 case PPC::BI__builtin_altivec_stvebx: 2432 case PPC::BI__builtin_altivec_stvehx: 2433 case PPC::BI__builtin_altivec_stvewx: 2434 { 2435 Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy); 2436 Ops[1] = Builder.CreateGEP(Ops[2], Ops[1], "tmp"); 2437 Ops.pop_back(); 2438 2439 switch (BuiltinID) { 2440 default: assert(0 && "Unsupported st intrinsic!"); 2441 case PPC::BI__builtin_altivec_stvx: 2442 ID = Intrinsic::ppc_altivec_stvx; 2443 break; 2444 case PPC::BI__builtin_altivec_stvxl: 2445 ID = Intrinsic::ppc_altivec_stvxl; 2446 break; 2447 case PPC::BI__builtin_altivec_stvebx: 2448 ID = Intrinsic::ppc_altivec_stvebx; 2449 break; 2450 case PPC::BI__builtin_altivec_stvehx: 2451 ID = Intrinsic::ppc_altivec_stvehx; 2452 break; 2453 case PPC::BI__builtin_altivec_stvewx: 2454 ID = Intrinsic::ppc_altivec_stvewx; 2455 break; 2456 } 2457 llvm::Function *F = CGM.getIntrinsic(ID); 2458 return Builder.CreateCall(F, Ops, ""); 2459 } 2460 } 2461 return 0; 2462} 2463