CGBuiltin.cpp revision 89f19e43730a2895cd81159d375c71c91872b8c2
1//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This contains code to emit Builtin calls as LLVM code. 11// 12//===----------------------------------------------------------------------===// 13 14#include "TargetInfo.h" 15#include "CodeGenFunction.h" 16#include "CodeGenModule.h" 17#include "CGObjCRuntime.h" 18#include "clang/Basic/TargetInfo.h" 19#include "clang/AST/APValue.h" 20#include "clang/AST/ASTContext.h" 21#include "clang/AST/Decl.h" 22#include "clang/Basic/TargetBuiltins.h" 23#include "llvm/Intrinsics.h" 24#include "llvm/Target/TargetData.h" 25using namespace clang; 26using namespace CodeGen; 27using namespace llvm; 28 29static void EmitMemoryBarrier(CodeGenFunction &CGF, 30 bool LoadLoad, bool LoadStore, 31 bool StoreLoad, bool StoreStore, 32 bool Device) { 33 Value *True = CGF.Builder.getTrue(); 34 Value *False = CGF.Builder.getFalse(); 35 Value *C[5] = { LoadLoad ? True : False, 36 LoadStore ? True : False, 37 StoreLoad ? True : False, 38 StoreStore ? True : False, 39 Device ? True : False }; 40 CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(Intrinsic::memory_barrier), 41 C, C + 5); 42} 43 44/// Emit the conversions required to turn the given value into an 45/// integer of the given size. 46static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V, 47 QualType T, const llvm::IntegerType *IntType) { 48 V = CGF.EmitToMemory(V, T); 49 50 if (V->getType()->isPointerTy()) 51 return CGF.Builder.CreatePtrToInt(V, IntType); 52 53 assert(V->getType() == IntType); 54 return V; 55} 56 57static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V, 58 QualType T, const llvm::Type *ResultType) { 59 V = CGF.EmitFromMemory(V, T); 60 61 if (ResultType->isPointerTy()) 62 return CGF.Builder.CreateIntToPtr(V, ResultType); 63 64 assert(V->getType() == ResultType); 65 return V; 66} 67 68// The atomic builtins are also full memory barriers. This is a utility for 69// wrapping a call to the builtins with memory barriers. 70static Value *EmitCallWithBarrier(CodeGenFunction &CGF, Value *Fn, 71 Value **ArgBegin, Value **ArgEnd) { 72 // FIXME: We need a target hook for whether this applies to device memory or 73 // not. 74 bool Device = true; 75 76 // Create barriers both before and after the call. 77 EmitMemoryBarrier(CGF, true, true, true, true, Device); 78 Value *Result = CGF.Builder.CreateCall(Fn, ArgBegin, ArgEnd); 79 EmitMemoryBarrier(CGF, true, true, true, true, Device); 80 return Result; 81} 82 83/// Utility to insert an atomic instruction based on Instrinsic::ID 84/// and the expression node. 85static RValue EmitBinaryAtomic(CodeGenFunction &CGF, 86 Intrinsic::ID Id, const CallExpr *E) { 87 QualType T = E->getType(); 88 assert(E->getArg(0)->getType()->isPointerType()); 89 assert(CGF.getContext().hasSameUnqualifiedType(T, 90 E->getArg(0)->getType()->getPointeeType())); 91 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())); 92 93 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); 94 unsigned AddrSpace = 95 cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace(); 96 97 const llvm::IntegerType *IntType = 98 llvm::IntegerType::get(CGF.getLLVMContext(), 99 CGF.getContext().getTypeSize(T)); 100 const llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); 101 102 const llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType }; 103 llvm::Value *AtomF = CGF.CGM.getIntrinsic(Id, IntrinsicTypes, 2); 104 105 llvm::Value *Args[2]; 106 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); 107 Args[1] = CGF.EmitScalarExpr(E->getArg(1)); 108 const llvm::Type *ValueType = Args[1]->getType(); 109 Args[1] = EmitToInt(CGF, Args[1], T, IntType); 110 111 llvm::Value *Result = EmitCallWithBarrier(CGF, AtomF, Args, Args + 2); 112 Result = EmitFromInt(CGF, Result, T, ValueType); 113 return RValue::get(Result); 114} 115 116/// Utility to insert an atomic instruction based Instrinsic::ID and 117/// the expression node, where the return value is the result of the 118/// operation. 119static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF, 120 Intrinsic::ID Id, const CallExpr *E, 121 Instruction::BinaryOps Op) { 122 QualType T = E->getType(); 123 assert(E->getArg(0)->getType()->isPointerType()); 124 assert(CGF.getContext().hasSameUnqualifiedType(T, 125 E->getArg(0)->getType()->getPointeeType())); 126 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())); 127 128 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); 129 unsigned AddrSpace = 130 cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace(); 131 132 const llvm::IntegerType *IntType = 133 llvm::IntegerType::get(CGF.getLLVMContext(), 134 CGF.getContext().getTypeSize(T)); 135 const llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); 136 137 const llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType }; 138 llvm::Value *AtomF = CGF.CGM.getIntrinsic(Id, IntrinsicTypes, 2); 139 140 llvm::Value *Args[2]; 141 Args[1] = CGF.EmitScalarExpr(E->getArg(1)); 142 const llvm::Type *ValueType = Args[1]->getType(); 143 Args[1] = EmitToInt(CGF, Args[1], T, IntType); 144 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); 145 146 llvm::Value *Result = EmitCallWithBarrier(CGF, AtomF, Args, Args + 2); 147 Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]); 148 Result = EmitFromInt(CGF, Result, T, ValueType); 149 return RValue::get(Result); 150} 151 152/// EmitFAbs - Emit a call to fabs/fabsf/fabsl, depending on the type of ValTy, 153/// which must be a scalar floating point type. 154static Value *EmitFAbs(CodeGenFunction &CGF, Value *V, QualType ValTy) { 155 const BuiltinType *ValTyP = ValTy->getAs<BuiltinType>(); 156 assert(ValTyP && "isn't scalar fp type!"); 157 158 StringRef FnName; 159 switch (ValTyP->getKind()) { 160 default: assert(0 && "Isn't a scalar fp type!"); 161 case BuiltinType::Float: FnName = "fabsf"; break; 162 case BuiltinType::Double: FnName = "fabs"; break; 163 case BuiltinType::LongDouble: FnName = "fabsl"; break; 164 } 165 166 // The prototype is something that takes and returns whatever V's type is. 167 llvm::FunctionType *FT = llvm::FunctionType::get(V->getType(), V->getType(), 168 false); 169 llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction(FT, FnName); 170 171 return CGF.Builder.CreateCall(Fn, V, "abs"); 172} 173 174RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, 175 unsigned BuiltinID, const CallExpr *E) { 176 // See if we can constant fold this builtin. If so, don't emit it at all. 177 Expr::EvalResult Result; 178 if (E->Evaluate(Result, CGM.getContext()) && 179 !Result.hasSideEffects()) { 180 if (Result.Val.isInt()) 181 return RValue::get(llvm::ConstantInt::get(getLLVMContext(), 182 Result.Val.getInt())); 183 if (Result.Val.isFloat()) 184 return RValue::get(llvm::ConstantFP::get(getLLVMContext(), 185 Result.Val.getFloat())); 186 } 187 188 switch (BuiltinID) { 189 default: break; // Handle intrinsics and libm functions below. 190 case Builtin::BI__builtin___CFStringMakeConstantString: 191 case Builtin::BI__builtin___NSStringMakeConstantString: 192 return RValue::get(CGM.EmitConstantExpr(E, E->getType(), 0)); 193 case Builtin::BI__builtin_stdarg_start: 194 case Builtin::BI__builtin_va_start: 195 case Builtin::BI__builtin_va_end: { 196 Value *ArgValue = EmitVAListRef(E->getArg(0)); 197 const llvm::Type *DestType = Int8PtrTy; 198 if (ArgValue->getType() != DestType) 199 ArgValue = Builder.CreateBitCast(ArgValue, DestType, 200 ArgValue->getName().data()); 201 202 Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_end) ? 203 Intrinsic::vaend : Intrinsic::vastart; 204 return RValue::get(Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue)); 205 } 206 case Builtin::BI__builtin_va_copy: { 207 Value *DstPtr = EmitVAListRef(E->getArg(0)); 208 Value *SrcPtr = EmitVAListRef(E->getArg(1)); 209 210 const llvm::Type *Type = Int8PtrTy; 211 212 DstPtr = Builder.CreateBitCast(DstPtr, Type); 213 SrcPtr = Builder.CreateBitCast(SrcPtr, Type); 214 return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy), 215 DstPtr, SrcPtr)); 216 } 217 case Builtin::BI__builtin_abs: { 218 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 219 220 Value *NegOp = Builder.CreateNeg(ArgValue, "neg"); 221 Value *CmpResult = 222 Builder.CreateICmpSGE(ArgValue, 223 llvm::Constant::getNullValue(ArgValue->getType()), 224 "abscond"); 225 Value *Result = 226 Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs"); 227 228 return RValue::get(Result); 229 } 230 case Builtin::BI__builtin_ctz: 231 case Builtin::BI__builtin_ctzl: 232 case Builtin::BI__builtin_ctzll: { 233 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 234 235 const llvm::Type *ArgType = ArgValue->getType(); 236 Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1); 237 238 const llvm::Type *ResultType = ConvertType(E->getType()); 239 Value *Result = Builder.CreateCall(F, ArgValue, "tmp"); 240 if (Result->getType() != ResultType) 241 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, 242 "cast"); 243 return RValue::get(Result); 244 } 245 case Builtin::BI__builtin_clz: 246 case Builtin::BI__builtin_clzl: 247 case Builtin::BI__builtin_clzll: { 248 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 249 250 const llvm::Type *ArgType = ArgValue->getType(); 251 Value *F = CGM.getIntrinsic(Intrinsic::ctlz, &ArgType, 1); 252 253 const llvm::Type *ResultType = ConvertType(E->getType()); 254 Value *Result = Builder.CreateCall(F, ArgValue, "tmp"); 255 if (Result->getType() != ResultType) 256 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, 257 "cast"); 258 return RValue::get(Result); 259 } 260 case Builtin::BI__builtin_ffs: 261 case Builtin::BI__builtin_ffsl: 262 case Builtin::BI__builtin_ffsll: { 263 // ffs(x) -> x ? cttz(x) + 1 : 0 264 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 265 266 const llvm::Type *ArgType = ArgValue->getType(); 267 Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1); 268 269 const llvm::Type *ResultType = ConvertType(E->getType()); 270 Value *Tmp = Builder.CreateAdd(Builder.CreateCall(F, ArgValue, "tmp"), 271 llvm::ConstantInt::get(ArgType, 1), "tmp"); 272 Value *Zero = llvm::Constant::getNullValue(ArgType); 273 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero"); 274 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs"); 275 if (Result->getType() != ResultType) 276 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, 277 "cast"); 278 return RValue::get(Result); 279 } 280 case Builtin::BI__builtin_parity: 281 case Builtin::BI__builtin_parityl: 282 case Builtin::BI__builtin_parityll: { 283 // parity(x) -> ctpop(x) & 1 284 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 285 286 const llvm::Type *ArgType = ArgValue->getType(); 287 Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1); 288 289 const llvm::Type *ResultType = ConvertType(E->getType()); 290 Value *Tmp = Builder.CreateCall(F, ArgValue, "tmp"); 291 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1), 292 "tmp"); 293 if (Result->getType() != ResultType) 294 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, 295 "cast"); 296 return RValue::get(Result); 297 } 298 case Builtin::BI__builtin_popcount: 299 case Builtin::BI__builtin_popcountl: 300 case Builtin::BI__builtin_popcountll: { 301 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 302 303 const llvm::Type *ArgType = ArgValue->getType(); 304 Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1); 305 306 const llvm::Type *ResultType = ConvertType(E->getType()); 307 Value *Result = Builder.CreateCall(F, ArgValue, "tmp"); 308 if (Result->getType() != ResultType) 309 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, 310 "cast"); 311 return RValue::get(Result); 312 } 313 case Builtin::BI__builtin_expect: { 314 // FIXME: pass expect through to LLVM 315 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 316 if (E->getArg(1)->HasSideEffects(getContext())) 317 (void)EmitScalarExpr(E->getArg(1)); 318 return RValue::get(ArgValue); 319 } 320 case Builtin::BI__builtin_bswap32: 321 case Builtin::BI__builtin_bswap64: { 322 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 323 const llvm::Type *ArgType = ArgValue->getType(); 324 Value *F = CGM.getIntrinsic(Intrinsic::bswap, &ArgType, 1); 325 return RValue::get(Builder.CreateCall(F, ArgValue, "tmp")); 326 } 327 case Builtin::BI__builtin_object_size: { 328 // We pass this builtin onto the optimizer so that it can 329 // figure out the object size in more complex cases. 330 const llvm::Type *ResType[] = { 331 ConvertType(E->getType()) 332 }; 333 334 // LLVM only supports 0 and 2, make sure that we pass along that 335 // as a boolean. 336 Value *Ty = EmitScalarExpr(E->getArg(1)); 337 ConstantInt *CI = dyn_cast<ConstantInt>(Ty); 338 assert(CI); 339 uint64_t val = CI->getZExtValue(); 340 CI = ConstantInt::get(Builder.getInt1Ty(), (val & 0x2) >> 1); 341 342 Value *F = CGM.getIntrinsic(Intrinsic::objectsize, ResType, 1); 343 return RValue::get(Builder.CreateCall2(F, 344 EmitScalarExpr(E->getArg(0)), 345 CI)); 346 } 347 case Builtin::BI__builtin_prefetch: { 348 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0)); 349 // FIXME: Technically these constants should of type 'int', yes? 350 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) : 351 llvm::ConstantInt::get(Int32Ty, 0); 352 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : 353 llvm::ConstantInt::get(Int32Ty, 3); 354 Value *Data = llvm::ConstantInt::get(Int32Ty, 1); 355 Value *F = CGM.getIntrinsic(Intrinsic::prefetch, 0, 0); 356 return RValue::get(Builder.CreateCall4(F, Address, RW, Locality, Data)); 357 } 358 case Builtin::BI__builtin_trap: { 359 Value *F = CGM.getIntrinsic(Intrinsic::trap, 0, 0); 360 return RValue::get(Builder.CreateCall(F)); 361 } 362 case Builtin::BI__builtin_unreachable: { 363 if (CatchUndefined) 364 EmitBranch(getTrapBB()); 365 else 366 Builder.CreateUnreachable(); 367 368 // We do need to preserve an insertion point. 369 EmitBlock(createBasicBlock("unreachable.cont")); 370 371 return RValue::get(0); 372 } 373 374 case Builtin::BI__builtin_powi: 375 case Builtin::BI__builtin_powif: 376 case Builtin::BI__builtin_powil: { 377 Value *Base = EmitScalarExpr(E->getArg(0)); 378 Value *Exponent = EmitScalarExpr(E->getArg(1)); 379 const llvm::Type *ArgType = Base->getType(); 380 Value *F = CGM.getIntrinsic(Intrinsic::powi, &ArgType, 1); 381 return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp")); 382 } 383 384 case Builtin::BI__builtin_isgreater: 385 case Builtin::BI__builtin_isgreaterequal: 386 case Builtin::BI__builtin_isless: 387 case Builtin::BI__builtin_islessequal: 388 case Builtin::BI__builtin_islessgreater: 389 case Builtin::BI__builtin_isunordered: { 390 // Ordered comparisons: we know the arguments to these are matching scalar 391 // floating point values. 392 Value *LHS = EmitScalarExpr(E->getArg(0)); 393 Value *RHS = EmitScalarExpr(E->getArg(1)); 394 395 switch (BuiltinID) { 396 default: assert(0 && "Unknown ordered comparison"); 397 case Builtin::BI__builtin_isgreater: 398 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp"); 399 break; 400 case Builtin::BI__builtin_isgreaterequal: 401 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp"); 402 break; 403 case Builtin::BI__builtin_isless: 404 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp"); 405 break; 406 case Builtin::BI__builtin_islessequal: 407 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp"); 408 break; 409 case Builtin::BI__builtin_islessgreater: 410 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp"); 411 break; 412 case Builtin::BI__builtin_isunordered: 413 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp"); 414 break; 415 } 416 // ZExt bool to int type. 417 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType()), 418 "tmp")); 419 } 420 case Builtin::BI__builtin_isnan: { 421 Value *V = EmitScalarExpr(E->getArg(0)); 422 V = Builder.CreateFCmpUNO(V, V, "cmp"); 423 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()), "tmp")); 424 } 425 426 case Builtin::BI__builtin_isinf: { 427 // isinf(x) --> fabs(x) == infinity 428 Value *V = EmitScalarExpr(E->getArg(0)); 429 V = EmitFAbs(*this, V, E->getArg(0)->getType()); 430 431 V = Builder.CreateFCmpOEQ(V, ConstantFP::getInfinity(V->getType()),"isinf"); 432 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()), "tmp")); 433 } 434 435 // TODO: BI__builtin_isinf_sign 436 // isinf_sign(x) -> isinf(x) ? (signbit(x) ? -1 : 1) : 0 437 438 case Builtin::BI__builtin_isnormal: { 439 // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min 440 Value *V = EmitScalarExpr(E->getArg(0)); 441 Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq"); 442 443 Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType()); 444 Value *IsLessThanInf = 445 Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf"); 446 APFloat Smallest = APFloat::getSmallestNormalized( 447 getContext().getFloatTypeSemantics(E->getArg(0)->getType())); 448 Value *IsNormal = 449 Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest), 450 "isnormal"); 451 V = Builder.CreateAnd(Eq, IsLessThanInf, "and"); 452 V = Builder.CreateAnd(V, IsNormal, "and"); 453 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); 454 } 455 456 case Builtin::BI__builtin_isfinite: { 457 // isfinite(x) --> x == x && fabs(x) != infinity; } 458 Value *V = EmitScalarExpr(E->getArg(0)); 459 Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq"); 460 461 Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType()); 462 Value *IsNotInf = 463 Builder.CreateFCmpUNE(Abs, ConstantFP::getInfinity(V->getType()),"isinf"); 464 465 V = Builder.CreateAnd(Eq, IsNotInf, "and"); 466 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); 467 } 468 469 case Builtin::BI__builtin_fpclassify: { 470 Value *V = EmitScalarExpr(E->getArg(5)); 471 const llvm::Type *Ty = ConvertType(E->getArg(5)->getType()); 472 473 // Create Result 474 BasicBlock *Begin = Builder.GetInsertBlock(); 475 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn); 476 Builder.SetInsertPoint(End); 477 PHINode *Result = 478 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4, 479 "fpclassify_result"); 480 481 // if (V==0) return FP_ZERO 482 Builder.SetInsertPoint(Begin); 483 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty), 484 "iszero"); 485 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4)); 486 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn); 487 Builder.CreateCondBr(IsZero, End, NotZero); 488 Result->addIncoming(ZeroLiteral, Begin); 489 490 // if (V != V) return FP_NAN 491 Builder.SetInsertPoint(NotZero); 492 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp"); 493 Value *NanLiteral = EmitScalarExpr(E->getArg(0)); 494 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn); 495 Builder.CreateCondBr(IsNan, End, NotNan); 496 Result->addIncoming(NanLiteral, NotZero); 497 498 // if (fabs(V) == infinity) return FP_INFINITY 499 Builder.SetInsertPoint(NotNan); 500 Value *VAbs = EmitFAbs(*this, V, E->getArg(5)->getType()); 501 Value *IsInf = 502 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()), 503 "isinf"); 504 Value *InfLiteral = EmitScalarExpr(E->getArg(1)); 505 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn); 506 Builder.CreateCondBr(IsInf, End, NotInf); 507 Result->addIncoming(InfLiteral, NotNan); 508 509 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL 510 Builder.SetInsertPoint(NotInf); 511 APFloat Smallest = APFloat::getSmallestNormalized( 512 getContext().getFloatTypeSemantics(E->getArg(5)->getType())); 513 Value *IsNormal = 514 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest), 515 "isnormal"); 516 Value *NormalResult = 517 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)), 518 EmitScalarExpr(E->getArg(3))); 519 Builder.CreateBr(End); 520 Result->addIncoming(NormalResult, NotInf); 521 522 // return Result 523 Builder.SetInsertPoint(End); 524 return RValue::get(Result); 525 } 526 527 case Builtin::BIalloca: 528 case Builtin::BI__builtin_alloca: { 529 Value *Size = EmitScalarExpr(E->getArg(0)); 530 return RValue::get(Builder.CreateAlloca(Builder.getInt8Ty(), Size, "tmp")); 531 } 532 case Builtin::BIbzero: 533 case Builtin::BI__builtin_bzero: { 534 Value *Address = EmitScalarExpr(E->getArg(0)); 535 Value *SizeVal = EmitScalarExpr(E->getArg(1)); 536 Builder.CreateMemSet(Address, Builder.getInt8(0), SizeVal, 1, false); 537 return RValue::get(Address); 538 } 539 case Builtin::BImemcpy: 540 case Builtin::BI__builtin_memcpy: { 541 Value *Address = EmitScalarExpr(E->getArg(0)); 542 Value *SrcAddr = EmitScalarExpr(E->getArg(1)); 543 Value *SizeVal = EmitScalarExpr(E->getArg(2)); 544 Builder.CreateMemCpy(Address, SrcAddr, SizeVal, 1, false); 545 return RValue::get(Address); 546 } 547 548 case Builtin::BI__builtin___memcpy_chk: { 549 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memset iff cst1<=cst2. 550 if (!E->getArg(2)->isEvaluatable(CGM.getContext()) || 551 !E->getArg(3)->isEvaluatable(CGM.getContext())) 552 break; 553 llvm::APSInt Size = E->getArg(2)->EvaluateAsInt(CGM.getContext()); 554 llvm::APSInt DstSize = E->getArg(3)->EvaluateAsInt(CGM.getContext()); 555 if (Size.ugt(DstSize)) 556 break; 557 Value *Dest = EmitScalarExpr(E->getArg(0)); 558 Value *Src = EmitScalarExpr(E->getArg(1)); 559 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); 560 Builder.CreateMemCpy(Dest, Src, SizeVal, 1, false); 561 return RValue::get(Dest); 562 } 563 564 case Builtin::BI__builtin_objc_memmove_collectable: { 565 Value *Address = EmitScalarExpr(E->getArg(0)); 566 Value *SrcAddr = EmitScalarExpr(E->getArg(1)); 567 Value *SizeVal = EmitScalarExpr(E->getArg(2)); 568 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, 569 Address, SrcAddr, SizeVal); 570 return RValue::get(Address); 571 } 572 573 case Builtin::BI__builtin___memmove_chk: { 574 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memset iff cst1<=cst2. 575 if (!E->getArg(2)->isEvaluatable(CGM.getContext()) || 576 !E->getArg(3)->isEvaluatable(CGM.getContext())) 577 break; 578 llvm::APSInt Size = E->getArg(2)->EvaluateAsInt(CGM.getContext()); 579 llvm::APSInt DstSize = E->getArg(3)->EvaluateAsInt(CGM.getContext()); 580 if (Size.ugt(DstSize)) 581 break; 582 Value *Dest = EmitScalarExpr(E->getArg(0)); 583 Value *Src = EmitScalarExpr(E->getArg(1)); 584 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); 585 Builder.CreateMemMove(Dest, Src, SizeVal, 1, false); 586 return RValue::get(Dest); 587 } 588 589 case Builtin::BImemmove: 590 case Builtin::BI__builtin_memmove: { 591 Value *Address = EmitScalarExpr(E->getArg(0)); 592 Value *SrcAddr = EmitScalarExpr(E->getArg(1)); 593 Value *SizeVal = EmitScalarExpr(E->getArg(2)); 594 Builder.CreateMemMove(Address, SrcAddr, SizeVal, 1, false); 595 return RValue::get(Address); 596 } 597 case Builtin::BImemset: 598 case Builtin::BI__builtin_memset: { 599 Value *Address = EmitScalarExpr(E->getArg(0)); 600 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), 601 Builder.getInt8Ty()); 602 Value *SizeVal = EmitScalarExpr(E->getArg(2)); 603 Builder.CreateMemSet(Address, ByteVal, SizeVal, 1, false); 604 return RValue::get(Address); 605 } 606 case Builtin::BI__builtin___memset_chk: { 607 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2. 608 if (!E->getArg(2)->isEvaluatable(CGM.getContext()) || 609 !E->getArg(3)->isEvaluatable(CGM.getContext())) 610 break; 611 llvm::APSInt Size = E->getArg(2)->EvaluateAsInt(CGM.getContext()); 612 llvm::APSInt DstSize = E->getArg(3)->EvaluateAsInt(CGM.getContext()); 613 if (Size.ugt(DstSize)) 614 break; 615 Value *Address = EmitScalarExpr(E->getArg(0)); 616 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), 617 Builder.getInt8Ty()); 618 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); 619 Builder.CreateMemSet(Address, ByteVal, SizeVal, 1, false); 620 621 return RValue::get(Address); 622 } 623 case Builtin::BI__builtin_dwarf_cfa: { 624 // The offset in bytes from the first argument to the CFA. 625 // 626 // Why on earth is this in the frontend? Is there any reason at 627 // all that the backend can't reasonably determine this while 628 // lowering llvm.eh.dwarf.cfa()? 629 // 630 // TODO: If there's a satisfactory reason, add a target hook for 631 // this instead of hard-coding 0, which is correct for most targets. 632 int32_t Offset = 0; 633 634 Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa, 0, 0); 635 return RValue::get(Builder.CreateCall(F, 636 llvm::ConstantInt::get(Int32Ty, Offset))); 637 } 638 case Builtin::BI__builtin_return_address: { 639 Value *Depth = EmitScalarExpr(E->getArg(0)); 640 Depth = Builder.CreateIntCast(Depth, Int32Ty, false, "tmp"); 641 Value *F = CGM.getIntrinsic(Intrinsic::returnaddress, 0, 0); 642 return RValue::get(Builder.CreateCall(F, Depth)); 643 } 644 case Builtin::BI__builtin_frame_address: { 645 Value *Depth = EmitScalarExpr(E->getArg(0)); 646 Depth = Builder.CreateIntCast(Depth, Int32Ty, false, "tmp"); 647 Value *F = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0); 648 return RValue::get(Builder.CreateCall(F, Depth)); 649 } 650 case Builtin::BI__builtin_extract_return_addr: { 651 Value *Address = EmitScalarExpr(E->getArg(0)); 652 Value *Result = getTargetHooks().decodeReturnAddress(*this, Address); 653 return RValue::get(Result); 654 } 655 case Builtin::BI__builtin_frob_return_addr: { 656 Value *Address = EmitScalarExpr(E->getArg(0)); 657 Value *Result = getTargetHooks().encodeReturnAddress(*this, Address); 658 return RValue::get(Result); 659 } 660 case Builtin::BI__builtin_dwarf_sp_column: { 661 const llvm::IntegerType *Ty 662 = cast<llvm::IntegerType>(ConvertType(E->getType())); 663 int Column = getTargetHooks().getDwarfEHStackPointer(CGM); 664 if (Column == -1) { 665 CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column"); 666 return RValue::get(llvm::UndefValue::get(Ty)); 667 } 668 return RValue::get(llvm::ConstantInt::get(Ty, Column, true)); 669 } 670 case Builtin::BI__builtin_init_dwarf_reg_size_table: { 671 Value *Address = EmitScalarExpr(E->getArg(0)); 672 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address)) 673 CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table"); 674 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType()))); 675 } 676 case Builtin::BI__builtin_eh_return: { 677 Value *Int = EmitScalarExpr(E->getArg(0)); 678 Value *Ptr = EmitScalarExpr(E->getArg(1)); 679 680 const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType()); 681 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) && 682 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants"); 683 Value *F = CGM.getIntrinsic(IntTy->getBitWidth() == 32 684 ? Intrinsic::eh_return_i32 685 : Intrinsic::eh_return_i64, 686 0, 0); 687 Builder.CreateCall2(F, Int, Ptr); 688 Builder.CreateUnreachable(); 689 690 // We do need to preserve an insertion point. 691 EmitBlock(createBasicBlock("builtin_eh_return.cont")); 692 693 return RValue::get(0); 694 } 695 case Builtin::BI__builtin_unwind_init: { 696 Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init, 0, 0); 697 return RValue::get(Builder.CreateCall(F)); 698 } 699 case Builtin::BI__builtin_extend_pointer: { 700 // Extends a pointer to the size of an _Unwind_Word, which is 701 // uint64_t on all platforms. Generally this gets poked into a 702 // register and eventually used as an address, so if the 703 // addressing registers are wider than pointers and the platform 704 // doesn't implicitly ignore high-order bits when doing 705 // addressing, we need to make sure we zext / sext based on 706 // the platform's expectations. 707 // 708 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html 709 710 // Cast the pointer to intptr_t. 711 Value *Ptr = EmitScalarExpr(E->getArg(0)); 712 Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast"); 713 714 // If that's 64 bits, we're done. 715 if (IntPtrTy->getBitWidth() == 64) 716 return RValue::get(Result); 717 718 // Otherwise, ask the codegen data what to do. 719 if (getTargetHooks().extendPointerWithSExt()) 720 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext")); 721 else 722 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext")); 723 } 724 case Builtin::BI__builtin_setjmp: { 725 // Buffer is a void**. 726 Value *Buf = EmitScalarExpr(E->getArg(0)); 727 728 // Store the frame pointer to the setjmp buffer. 729 Value *FrameAddr = 730 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress), 731 ConstantInt::get(Int32Ty, 0)); 732 Builder.CreateStore(FrameAddr, Buf); 733 734 // Store the stack pointer to the setjmp buffer. 735 Value *StackAddr = 736 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave)); 737 Value *StackSaveSlot = 738 Builder.CreateGEP(Buf, ConstantInt::get(Int32Ty, 2)); 739 Builder.CreateStore(StackAddr, StackSaveSlot); 740 741 // Call LLVM's EH setjmp, which is lightweight. 742 Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp); 743 Buf = Builder.CreateBitCast(Buf, Int8PtrTy); 744 return RValue::get(Builder.CreateCall(F, Buf)); 745 } 746 case Builtin::BI__builtin_longjmp: { 747 Value *Buf = EmitScalarExpr(E->getArg(0)); 748 Buf = Builder.CreateBitCast(Buf, Int8PtrTy); 749 750 // Call LLVM's EH longjmp, which is lightweight. 751 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf); 752 753 // longjmp doesn't return; mark this as unreachable. 754 Builder.CreateUnreachable(); 755 756 // We do need to preserve an insertion point. 757 EmitBlock(createBasicBlock("longjmp.cont")); 758 759 return RValue::get(0); 760 } 761 case Builtin::BI__sync_fetch_and_add: 762 case Builtin::BI__sync_fetch_and_sub: 763 case Builtin::BI__sync_fetch_and_or: 764 case Builtin::BI__sync_fetch_and_and: 765 case Builtin::BI__sync_fetch_and_xor: 766 case Builtin::BI__sync_add_and_fetch: 767 case Builtin::BI__sync_sub_and_fetch: 768 case Builtin::BI__sync_and_and_fetch: 769 case Builtin::BI__sync_or_and_fetch: 770 case Builtin::BI__sync_xor_and_fetch: 771 case Builtin::BI__sync_val_compare_and_swap: 772 case Builtin::BI__sync_bool_compare_and_swap: 773 case Builtin::BI__sync_lock_test_and_set: 774 case Builtin::BI__sync_lock_release: 775 case Builtin::BI__sync_swap: 776 assert(0 && "Shouldn't make it through sema"); 777 case Builtin::BI__sync_fetch_and_add_1: 778 case Builtin::BI__sync_fetch_and_add_2: 779 case Builtin::BI__sync_fetch_and_add_4: 780 case Builtin::BI__sync_fetch_and_add_8: 781 case Builtin::BI__sync_fetch_and_add_16: 782 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_add, E); 783 case Builtin::BI__sync_fetch_and_sub_1: 784 case Builtin::BI__sync_fetch_and_sub_2: 785 case Builtin::BI__sync_fetch_and_sub_4: 786 case Builtin::BI__sync_fetch_and_sub_8: 787 case Builtin::BI__sync_fetch_and_sub_16: 788 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_sub, E); 789 case Builtin::BI__sync_fetch_and_or_1: 790 case Builtin::BI__sync_fetch_and_or_2: 791 case Builtin::BI__sync_fetch_and_or_4: 792 case Builtin::BI__sync_fetch_and_or_8: 793 case Builtin::BI__sync_fetch_and_or_16: 794 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_or, E); 795 case Builtin::BI__sync_fetch_and_and_1: 796 case Builtin::BI__sync_fetch_and_and_2: 797 case Builtin::BI__sync_fetch_and_and_4: 798 case Builtin::BI__sync_fetch_and_and_8: 799 case Builtin::BI__sync_fetch_and_and_16: 800 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_and, E); 801 case Builtin::BI__sync_fetch_and_xor_1: 802 case Builtin::BI__sync_fetch_and_xor_2: 803 case Builtin::BI__sync_fetch_and_xor_4: 804 case Builtin::BI__sync_fetch_and_xor_8: 805 case Builtin::BI__sync_fetch_and_xor_16: 806 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_xor, E); 807 808 // Clang extensions: not overloaded yet. 809 case Builtin::BI__sync_fetch_and_min: 810 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_min, E); 811 case Builtin::BI__sync_fetch_and_max: 812 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_max, E); 813 case Builtin::BI__sync_fetch_and_umin: 814 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umin, E); 815 case Builtin::BI__sync_fetch_and_umax: 816 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umax, E); 817 818 case Builtin::BI__sync_add_and_fetch_1: 819 case Builtin::BI__sync_add_and_fetch_2: 820 case Builtin::BI__sync_add_and_fetch_4: 821 case Builtin::BI__sync_add_and_fetch_8: 822 case Builtin::BI__sync_add_and_fetch_16: 823 return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_add, E, 824 llvm::Instruction::Add); 825 case Builtin::BI__sync_sub_and_fetch_1: 826 case Builtin::BI__sync_sub_and_fetch_2: 827 case Builtin::BI__sync_sub_and_fetch_4: 828 case Builtin::BI__sync_sub_and_fetch_8: 829 case Builtin::BI__sync_sub_and_fetch_16: 830 return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_sub, E, 831 llvm::Instruction::Sub); 832 case Builtin::BI__sync_and_and_fetch_1: 833 case Builtin::BI__sync_and_and_fetch_2: 834 case Builtin::BI__sync_and_and_fetch_4: 835 case Builtin::BI__sync_and_and_fetch_8: 836 case Builtin::BI__sync_and_and_fetch_16: 837 return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_and, E, 838 llvm::Instruction::And); 839 case Builtin::BI__sync_or_and_fetch_1: 840 case Builtin::BI__sync_or_and_fetch_2: 841 case Builtin::BI__sync_or_and_fetch_4: 842 case Builtin::BI__sync_or_and_fetch_8: 843 case Builtin::BI__sync_or_and_fetch_16: 844 return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_or, E, 845 llvm::Instruction::Or); 846 case Builtin::BI__sync_xor_and_fetch_1: 847 case Builtin::BI__sync_xor_and_fetch_2: 848 case Builtin::BI__sync_xor_and_fetch_4: 849 case Builtin::BI__sync_xor_and_fetch_8: 850 case Builtin::BI__sync_xor_and_fetch_16: 851 return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_xor, E, 852 llvm::Instruction::Xor); 853 854 case Builtin::BI__sync_val_compare_and_swap_1: 855 case Builtin::BI__sync_val_compare_and_swap_2: 856 case Builtin::BI__sync_val_compare_and_swap_4: 857 case Builtin::BI__sync_val_compare_and_swap_8: 858 case Builtin::BI__sync_val_compare_and_swap_16: { 859 QualType T = E->getType(); 860 llvm::Value *DestPtr = EmitScalarExpr(E->getArg(0)); 861 unsigned AddrSpace = 862 cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace(); 863 864 const llvm::IntegerType *IntType = 865 llvm::IntegerType::get(getLLVMContext(), 866 getContext().getTypeSize(T)); 867 const llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); 868 const llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType }; 869 Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, 870 IntrinsicTypes, 2); 871 872 Value *Args[3]; 873 Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType); 874 Args[1] = EmitScalarExpr(E->getArg(1)); 875 const llvm::Type *ValueType = Args[1]->getType(); 876 Args[1] = EmitToInt(*this, Args[1], T, IntType); 877 Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType); 878 879 Value *Result = EmitCallWithBarrier(*this, AtomF, Args, Args + 3); 880 Result = EmitFromInt(*this, Result, T, ValueType); 881 return RValue::get(Result); 882 } 883 884 case Builtin::BI__sync_bool_compare_and_swap_1: 885 case Builtin::BI__sync_bool_compare_and_swap_2: 886 case Builtin::BI__sync_bool_compare_and_swap_4: 887 case Builtin::BI__sync_bool_compare_and_swap_8: 888 case Builtin::BI__sync_bool_compare_and_swap_16: { 889 QualType T = E->getArg(1)->getType(); 890 llvm::Value *DestPtr = EmitScalarExpr(E->getArg(0)); 891 unsigned AddrSpace = 892 cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace(); 893 894 const llvm::IntegerType *IntType = 895 llvm::IntegerType::get(getLLVMContext(), 896 getContext().getTypeSize(T)); 897 const llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); 898 const llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType }; 899 Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, 900 IntrinsicTypes, 2); 901 902 Value *Args[3]; 903 Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType); 904 Args[1] = EmitToInt(*this, EmitScalarExpr(E->getArg(1)), T, IntType); 905 Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType); 906 907 Value *OldVal = Args[1]; 908 Value *PrevVal = EmitCallWithBarrier(*this, AtomF, Args, Args + 3); 909 Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal); 910 // zext bool to int. 911 Result = Builder.CreateZExt(Result, ConvertType(E->getType())); 912 return RValue::get(Result); 913 } 914 915 case Builtin::BI__sync_swap_1: 916 case Builtin::BI__sync_swap_2: 917 case Builtin::BI__sync_swap_4: 918 case Builtin::BI__sync_swap_8: 919 case Builtin::BI__sync_swap_16: 920 return EmitBinaryAtomic(*this, Intrinsic::atomic_swap, E); 921 922 case Builtin::BI__sync_lock_test_and_set_1: 923 case Builtin::BI__sync_lock_test_and_set_2: 924 case Builtin::BI__sync_lock_test_and_set_4: 925 case Builtin::BI__sync_lock_test_and_set_8: 926 case Builtin::BI__sync_lock_test_and_set_16: 927 return EmitBinaryAtomic(*this, Intrinsic::atomic_swap, E); 928 929 case Builtin::BI__sync_lock_release_1: 930 case Builtin::BI__sync_lock_release_2: 931 case Builtin::BI__sync_lock_release_4: 932 case Builtin::BI__sync_lock_release_8: 933 case Builtin::BI__sync_lock_release_16: { 934 Value *Ptr = EmitScalarExpr(E->getArg(0)); 935 const llvm::Type *ElTy = 936 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 937 llvm::StoreInst *Store = 938 Builder.CreateStore(llvm::Constant::getNullValue(ElTy), Ptr); 939 Store->setVolatile(true); 940 return RValue::get(0); 941 } 942 943 case Builtin::BI__sync_synchronize: { 944 // We assume like gcc appears to, that this only applies to cached memory. 945 EmitMemoryBarrier(*this, true, true, true, true, false); 946 return RValue::get(0); 947 } 948 949 case Builtin::BI__builtin_llvm_memory_barrier: { 950 Value *C[5] = { 951 EmitScalarExpr(E->getArg(0)), 952 EmitScalarExpr(E->getArg(1)), 953 EmitScalarExpr(E->getArg(2)), 954 EmitScalarExpr(E->getArg(3)), 955 EmitScalarExpr(E->getArg(4)) 956 }; 957 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::memory_barrier), C, C + 5); 958 return RValue::get(0); 959 } 960 961 // Library functions with special handling. 962 case Builtin::BIsqrt: 963 case Builtin::BIsqrtf: 964 case Builtin::BIsqrtl: { 965 // TODO: there is currently no set of optimizer flags 966 // sufficient for us to rewrite sqrt to @llvm.sqrt. 967 // -fmath-errno=0 is not good enough; we need finiteness. 968 // We could probably precondition the call with an ult 969 // against 0, but is that worth the complexity? 970 break; 971 } 972 973 case Builtin::BIpow: 974 case Builtin::BIpowf: 975 case Builtin::BIpowl: { 976 // Rewrite sqrt to intrinsic if allowed. 977 if (!FD->hasAttr<ConstAttr>()) 978 break; 979 Value *Base = EmitScalarExpr(E->getArg(0)); 980 Value *Exponent = EmitScalarExpr(E->getArg(1)); 981 const llvm::Type *ArgType = Base->getType(); 982 Value *F = CGM.getIntrinsic(Intrinsic::pow, &ArgType, 1); 983 return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp")); 984 } 985 986 case Builtin::BI__builtin_signbit: 987 case Builtin::BI__builtin_signbitf: 988 case Builtin::BI__builtin_signbitl: { 989 LLVMContext &C = CGM.getLLVMContext(); 990 991 Value *Arg = EmitScalarExpr(E->getArg(0)); 992 const llvm::Type *ArgTy = Arg->getType(); 993 if (ArgTy->isPPC_FP128Ty()) 994 break; // FIXME: I'm not sure what the right implementation is here. 995 int ArgWidth = ArgTy->getPrimitiveSizeInBits(); 996 const llvm::Type *ArgIntTy = llvm::IntegerType::get(C, ArgWidth); 997 Value *BCArg = Builder.CreateBitCast(Arg, ArgIntTy); 998 Value *ZeroCmp = llvm::Constant::getNullValue(ArgIntTy); 999 Value *Result = Builder.CreateICmpSLT(BCArg, ZeroCmp); 1000 return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType()))); 1001 } 1002 } 1003 1004 // If this is an alias for a libm function (e.g. __builtin_sin) turn it into 1005 // that function. 1006 if (getContext().BuiltinInfo.isLibFunction(BuiltinID) || 1007 getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID)) 1008 return EmitCall(E->getCallee()->getType(), 1009 CGM.getBuiltinLibFunction(FD, BuiltinID), 1010 ReturnValueSlot(), E->arg_begin(), E->arg_end(), FD); 1011 1012 // See if we have a target specific intrinsic. 1013 const char *Name = getContext().BuiltinInfo.GetName(BuiltinID); 1014 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic; 1015 if (const char *Prefix = 1016 llvm::Triple::getArchTypePrefix(Target.getTriple().getArch())) 1017 IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix, Name); 1018 1019 if (IntrinsicID != Intrinsic::not_intrinsic) { 1020 SmallVector<Value*, 16> Args; 1021 1022 // Find out if any arguments are required to be integer constant 1023 // expressions. 1024 unsigned ICEArguments = 0; 1025 ASTContext::GetBuiltinTypeError Error; 1026 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); 1027 assert(Error == ASTContext::GE_None && "Should not codegen an error"); 1028 1029 Function *F = CGM.getIntrinsic(IntrinsicID); 1030 const llvm::FunctionType *FTy = F->getFunctionType(); 1031 1032 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) { 1033 Value *ArgValue; 1034 // If this is a normal argument, just emit it as a scalar. 1035 if ((ICEArguments & (1 << i)) == 0) { 1036 ArgValue = EmitScalarExpr(E->getArg(i)); 1037 } else { 1038 // If this is required to be a constant, constant fold it so that we 1039 // know that the generated intrinsic gets a ConstantInt. 1040 llvm::APSInt Result; 1041 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result,getContext()); 1042 assert(IsConst && "Constant arg isn't actually constant?"); 1043 (void)IsConst; 1044 ArgValue = llvm::ConstantInt::get(getLLVMContext(), Result); 1045 } 1046 1047 // If the intrinsic arg type is different from the builtin arg type 1048 // we need to do a bit cast. 1049 const llvm::Type *PTy = FTy->getParamType(i); 1050 if (PTy != ArgValue->getType()) { 1051 assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) && 1052 "Must be able to losslessly bit cast to param"); 1053 ArgValue = Builder.CreateBitCast(ArgValue, PTy); 1054 } 1055 1056 Args.push_back(ArgValue); 1057 } 1058 1059 Value *V = Builder.CreateCall(F, Args.data(), Args.data() + Args.size()); 1060 QualType BuiltinRetType = E->getType(); 1061 1062 const llvm::Type *RetTy = llvm::Type::getVoidTy(getLLVMContext()); 1063 if (!BuiltinRetType->isVoidType()) RetTy = ConvertType(BuiltinRetType); 1064 1065 if (RetTy != V->getType()) { 1066 assert(V->getType()->canLosslesslyBitCastTo(RetTy) && 1067 "Must be able to losslessly bit cast result type"); 1068 V = Builder.CreateBitCast(V, RetTy); 1069 } 1070 1071 return RValue::get(V); 1072 } 1073 1074 // See if we have a target specific builtin that needs to be lowered. 1075 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E)) 1076 return RValue::get(V); 1077 1078 ErrorUnsupported(E, "builtin function"); 1079 1080 // Unknown builtin, for now just dump it out and return undef. 1081 if (hasAggregateLLVMType(E->getType())) 1082 return RValue::getAggregate(CreateMemTemp(E->getType())); 1083 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType()))); 1084} 1085 1086Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID, 1087 const CallExpr *E) { 1088 switch (Target.getTriple().getArch()) { 1089 case llvm::Triple::arm: 1090 case llvm::Triple::thumb: 1091 return EmitARMBuiltinExpr(BuiltinID, E); 1092 case llvm::Triple::x86: 1093 case llvm::Triple::x86_64: 1094 return EmitX86BuiltinExpr(BuiltinID, E); 1095 case llvm::Triple::ppc: 1096 case llvm::Triple::ppc64: 1097 return EmitPPCBuiltinExpr(BuiltinID, E); 1098 default: 1099 return 0; 1100 } 1101} 1102 1103static const llvm::VectorType *GetNeonType(LLVMContext &C, unsigned type, 1104 bool q) { 1105 switch (type) { 1106 default: break; 1107 case 0: 1108 case 5: return llvm::VectorType::get(llvm::Type::getInt8Ty(C), 8 << (int)q); 1109 case 6: 1110 case 7: 1111 case 1: return llvm::VectorType::get(llvm::Type::getInt16Ty(C),4 << (int)q); 1112 case 2: return llvm::VectorType::get(llvm::Type::getInt32Ty(C),2 << (int)q); 1113 case 3: return llvm::VectorType::get(llvm::Type::getInt64Ty(C),1 << (int)q); 1114 case 4: return llvm::VectorType::get(llvm::Type::getFloatTy(C),2 << (int)q); 1115 }; 1116 return 0; 1117} 1118 1119Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) { 1120 unsigned nElts = cast<llvm::VectorType>(V->getType())->getNumElements(); 1121 SmallVector<Constant*, 16> Indices(nElts, C); 1122 Value* SV = llvm::ConstantVector::get(Indices); 1123 return Builder.CreateShuffleVector(V, V, SV, "lane"); 1124} 1125 1126Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops, 1127 const char *name, 1128 unsigned shift, bool rightshift) { 1129 unsigned j = 0; 1130 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end(); 1131 ai != ae; ++ai, ++j) 1132 if (shift > 0 && shift == j) 1133 Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift); 1134 else 1135 Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name); 1136 1137 return Builder.CreateCall(F, Ops.begin(), Ops.end(), name); 1138} 1139 1140Value *CodeGenFunction::EmitNeonShiftVector(Value *V, const llvm::Type *Ty, 1141 bool neg) { 1142 ConstantInt *CI = cast<ConstantInt>(V); 1143 int SV = CI->getSExtValue(); 1144 1145 const llvm::VectorType *VTy = cast<llvm::VectorType>(Ty); 1146 llvm::Constant *C = ConstantInt::get(VTy->getElementType(), neg ? -SV : SV); 1147 SmallVector<llvm::Constant*, 16> CV(VTy->getNumElements(), C); 1148 return llvm::ConstantVector::get(CV); 1149} 1150 1151/// GetPointeeAlignment - Given an expression with a pointer type, find the 1152/// alignment of the type referenced by the pointer. Skip over implicit 1153/// casts. 1154static Value *GetPointeeAlignment(CodeGenFunction &CGF, const Expr *Addr) { 1155 unsigned Align = 1; 1156 // Check if the type is a pointer. The implicit cast operand might not be. 1157 while (Addr->getType()->isPointerType()) { 1158 QualType PtTy = Addr->getType()->getPointeeType(); 1159 unsigned NewA = CGF.getContext().getTypeAlignInChars(PtTy).getQuantity(); 1160 if (NewA > Align) 1161 Align = NewA; 1162 1163 // If the address is an implicit cast, repeat with the cast operand. 1164 if (const ImplicitCastExpr *CastAddr = dyn_cast<ImplicitCastExpr>(Addr)) { 1165 Addr = CastAddr->getSubExpr(); 1166 continue; 1167 } 1168 break; 1169 } 1170 return llvm::ConstantInt::get(CGF.Int32Ty, Align); 1171} 1172 1173Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, 1174 const CallExpr *E) { 1175 if (BuiltinID == ARM::BI__clear_cache) { 1176 const FunctionDecl *FD = E->getDirectCallee(); 1177 // Oddly people write this call without args on occasion and gcc accepts 1178 // it - it's also marked as varargs in the description file. 1179 llvm::SmallVector<Value*, 2> Ops; 1180 for (unsigned i = 0; i < E->getNumArgs(); i++) 1181 Ops.push_back(EmitScalarExpr(E->getArg(i))); 1182 const llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType()); 1183 const llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty); 1184 llvm::StringRef Name = FD->getName(); 1185 return Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), 1186 Ops.begin(), Ops.end()); 1187 } 1188 1189 if (BuiltinID == ARM::BI__builtin_arm_ldrexd) { 1190 Function *F = CGM.getIntrinsic(Intrinsic::arm_ldrexd); 1191 1192 Value *LdPtr = EmitScalarExpr(E->getArg(0)); 1193 Value *Val = Builder.CreateCall(F, LdPtr, "ldrexd"); 1194 1195 Value *Val0 = Builder.CreateExtractValue(Val, 1); 1196 Value *Val1 = Builder.CreateExtractValue(Val, 0); 1197 Val0 = Builder.CreateZExt(Val0, Int64Ty); 1198 Val1 = Builder.CreateZExt(Val1, Int64Ty); 1199 1200 Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32); 1201 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */); 1202 return Builder.CreateOr(Val, Val1); 1203 } 1204 1205 if (BuiltinID == ARM::BI__builtin_arm_strexd) { 1206 Function *F = CGM.getIntrinsic(Intrinsic::arm_strexd); 1207 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, NULL); 1208 1209 Value *One = llvm::ConstantInt::get(Int32Ty, 1); 1210 Value *Tmp = Builder.CreateAlloca(Int64Ty, One, "tmp"); 1211 Value *Val = EmitScalarExpr(E->getArg(0)); 1212 Builder.CreateStore(Val, Tmp); 1213 1214 Value *LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy)); 1215 Val = Builder.CreateLoad(LdPtr); 1216 1217 Value *Arg0 = Builder.CreateExtractValue(Val, 0); 1218 Value *Arg1 = Builder.CreateExtractValue(Val, 1); 1219 Value *StPtr = EmitScalarExpr(E->getArg(1)); 1220 return Builder.CreateCall3(F, Arg0, Arg1, StPtr, "strexd"); 1221 } 1222 1223 llvm::SmallVector<Value*, 4> Ops; 1224 for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) 1225 Ops.push_back(EmitScalarExpr(E->getArg(i))); 1226 1227 llvm::APSInt Result; 1228 const Expr *Arg = E->getArg(E->getNumArgs()-1); 1229 if (!Arg->isIntegerConstantExpr(Result, getContext())) 1230 return 0; 1231 1232 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f || 1233 BuiltinID == ARM::BI__builtin_arm_vcvtr_d) { 1234 // Determine the overloaded type of this builtin. 1235 const llvm::Type *Ty; 1236 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f) 1237 Ty = llvm::Type::getFloatTy(getLLVMContext()); 1238 else 1239 Ty = llvm::Type::getDoubleTy(getLLVMContext()); 1240 1241 // Determine whether this is an unsigned conversion or not. 1242 bool usgn = Result.getZExtValue() == 1; 1243 unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr; 1244 1245 // Call the appropriate intrinsic. 1246 Function *F = CGM.getIntrinsic(Int, &Ty, 1); 1247 return Builder.CreateCall(F, Ops.begin(), Ops.end(), "vcvtr"); 1248 } 1249 1250 // Determine the type of this overloaded NEON intrinsic. 1251 unsigned type = Result.getZExtValue(); 1252 bool usgn = type & 0x08; 1253 bool quad = type & 0x10; 1254 bool poly = (type & 0x7) == 5 || (type & 0x7) == 6; 1255 (void)poly; // Only used in assert()s. 1256 bool rightShift = false; 1257 1258 const llvm::VectorType *VTy = GetNeonType(getLLVMContext(), type & 0x7, quad); 1259 const llvm::Type *Ty = VTy; 1260 if (!Ty) 1261 return 0; 1262 1263 unsigned Int; 1264 switch (BuiltinID) { 1265 default: return 0; 1266 case ARM::BI__builtin_vabd_v: 1267 case ARM::BI__builtin_vabdq_v: 1268 Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds; 1269 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vabd"); 1270 case ARM::BI__builtin_vabs_v: 1271 case ARM::BI__builtin_vabsq_v: 1272 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vabs, &Ty, 1), 1273 Ops, "vabs"); 1274 case ARM::BI__builtin_vaddhn_v: 1275 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vaddhn, &Ty, 1), 1276 Ops, "vaddhn"); 1277 case ARM::BI__builtin_vcale_v: 1278 std::swap(Ops[0], Ops[1]); 1279 case ARM::BI__builtin_vcage_v: { 1280 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacged); 1281 return EmitNeonCall(F, Ops, "vcage"); 1282 } 1283 case ARM::BI__builtin_vcaleq_v: 1284 std::swap(Ops[0], Ops[1]); 1285 case ARM::BI__builtin_vcageq_v: { 1286 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgeq); 1287 return EmitNeonCall(F, Ops, "vcage"); 1288 } 1289 case ARM::BI__builtin_vcalt_v: 1290 std::swap(Ops[0], Ops[1]); 1291 case ARM::BI__builtin_vcagt_v: { 1292 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtd); 1293 return EmitNeonCall(F, Ops, "vcagt"); 1294 } 1295 case ARM::BI__builtin_vcaltq_v: 1296 std::swap(Ops[0], Ops[1]); 1297 case ARM::BI__builtin_vcagtq_v: { 1298 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtq); 1299 return EmitNeonCall(F, Ops, "vcagt"); 1300 } 1301 case ARM::BI__builtin_vcls_v: 1302 case ARM::BI__builtin_vclsq_v: { 1303 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcls, &Ty, 1); 1304 return EmitNeonCall(F, Ops, "vcls"); 1305 } 1306 case ARM::BI__builtin_vclz_v: 1307 case ARM::BI__builtin_vclzq_v: { 1308 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vclz, &Ty, 1); 1309 return EmitNeonCall(F, Ops, "vclz"); 1310 } 1311 case ARM::BI__builtin_vcnt_v: 1312 case ARM::BI__builtin_vcntq_v: { 1313 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcnt, &Ty, 1); 1314 return EmitNeonCall(F, Ops, "vcnt"); 1315 } 1316 case ARM::BI__builtin_vcvt_f16_v: { 1317 assert((type & 0x7) == 7 && !quad && "unexpected vcvt_f16_v builtin"); 1318 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcvtfp2hf); 1319 return EmitNeonCall(F, Ops, "vcvt"); 1320 } 1321 case ARM::BI__builtin_vcvt_f32_f16: { 1322 assert((type & 0x7) == 7 && !quad && "unexpected vcvt_f32_f16 builtin"); 1323 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcvthf2fp); 1324 return EmitNeonCall(F, Ops, "vcvt"); 1325 } 1326 case ARM::BI__builtin_vcvt_f32_v: 1327 case ARM::BI__builtin_vcvtq_f32_v: { 1328 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1329 Ty = GetNeonType(getLLVMContext(), 4, quad); 1330 return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt") 1331 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt"); 1332 } 1333 case ARM::BI__builtin_vcvt_s32_v: 1334 case ARM::BI__builtin_vcvt_u32_v: 1335 case ARM::BI__builtin_vcvtq_s32_v: 1336 case ARM::BI__builtin_vcvtq_u32_v: { 1337 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(getLLVMContext(), 4, quad)); 1338 return usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt") 1339 : Builder.CreateFPToSI(Ops[0], Ty, "vcvt"); 1340 } 1341 case ARM::BI__builtin_vcvt_n_f32_v: 1342 case ARM::BI__builtin_vcvtq_n_f32_v: { 1343 const llvm::Type *Tys[2] = { GetNeonType(getLLVMContext(), 4, quad), Ty }; 1344 Int = usgn ? Intrinsic::arm_neon_vcvtfxu2fp : Intrinsic::arm_neon_vcvtfxs2fp; 1345 Function *F = CGM.getIntrinsic(Int, Tys, 2); 1346 return EmitNeonCall(F, Ops, "vcvt_n"); 1347 } 1348 case ARM::BI__builtin_vcvt_n_s32_v: 1349 case ARM::BI__builtin_vcvt_n_u32_v: 1350 case ARM::BI__builtin_vcvtq_n_s32_v: 1351 case ARM::BI__builtin_vcvtq_n_u32_v: { 1352 const llvm::Type *Tys[2] = { Ty, GetNeonType(getLLVMContext(), 4, quad) }; 1353 Int = usgn ? Intrinsic::arm_neon_vcvtfp2fxu : Intrinsic::arm_neon_vcvtfp2fxs; 1354 Function *F = CGM.getIntrinsic(Int, Tys, 2); 1355 return EmitNeonCall(F, Ops, "vcvt_n"); 1356 } 1357 case ARM::BI__builtin_vext_v: 1358 case ARM::BI__builtin_vextq_v: { 1359 int CV = cast<ConstantInt>(Ops[2])->getSExtValue(); 1360 SmallVector<Constant*, 16> Indices; 1361 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) 1362 Indices.push_back(ConstantInt::get(Int32Ty, i+CV)); 1363 1364 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1365 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 1366 Value *SV = llvm::ConstantVector::get(Indices); 1367 return Builder.CreateShuffleVector(Ops[0], Ops[1], SV, "vext"); 1368 } 1369 case ARM::BI__builtin_vget_lane_i8: 1370 case ARM::BI__builtin_vget_lane_i16: 1371 case ARM::BI__builtin_vget_lane_i32: 1372 case ARM::BI__builtin_vget_lane_i64: 1373 case ARM::BI__builtin_vget_lane_f32: 1374 case ARM::BI__builtin_vgetq_lane_i8: 1375 case ARM::BI__builtin_vgetq_lane_i16: 1376 case ARM::BI__builtin_vgetq_lane_i32: 1377 case ARM::BI__builtin_vgetq_lane_i64: 1378 case ARM::BI__builtin_vgetq_lane_f32: 1379 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), 1380 "vget_lane"); 1381 case ARM::BI__builtin_vhadd_v: 1382 case ARM::BI__builtin_vhaddq_v: 1383 Int = usgn ? Intrinsic::arm_neon_vhaddu : Intrinsic::arm_neon_vhadds; 1384 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vhadd"); 1385 case ARM::BI__builtin_vhsub_v: 1386 case ARM::BI__builtin_vhsubq_v: 1387 Int = usgn ? Intrinsic::arm_neon_vhsubu : Intrinsic::arm_neon_vhsubs; 1388 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vhsub"); 1389 case ARM::BI__builtin_vld1_v: 1390 case ARM::BI__builtin_vld1q_v: 1391 Ops.push_back(GetPointeeAlignment(*this, E->getArg(0))); 1392 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vld1, &Ty, 1), 1393 Ops, "vld1"); 1394 case ARM::BI__builtin_vld1_lane_v: 1395 case ARM::BI__builtin_vld1q_lane_v: 1396 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 1397 Ty = llvm::PointerType::getUnqual(VTy->getElementType()); 1398 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1399 Ops[0] = Builder.CreateLoad(Ops[0]); 1400 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane"); 1401 case ARM::BI__builtin_vld1_dup_v: 1402 case ARM::BI__builtin_vld1q_dup_v: { 1403 Value *V = UndefValue::get(Ty); 1404 Ty = llvm::PointerType::getUnqual(VTy->getElementType()); 1405 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1406 Ops[0] = Builder.CreateLoad(Ops[0]); 1407 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0); 1408 Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI); 1409 return EmitNeonSplat(Ops[0], CI); 1410 } 1411 case ARM::BI__builtin_vld2_v: 1412 case ARM::BI__builtin_vld2q_v: { 1413 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2, &Ty, 1); 1414 Value *Align = GetPointeeAlignment(*this, E->getArg(1)); 1415 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld2"); 1416 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 1417 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1418 return Builder.CreateStore(Ops[1], Ops[0]); 1419 } 1420 case ARM::BI__builtin_vld3_v: 1421 case ARM::BI__builtin_vld3q_v: { 1422 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3, &Ty, 1); 1423 Value *Align = GetPointeeAlignment(*this, E->getArg(1)); 1424 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld3"); 1425 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 1426 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1427 return Builder.CreateStore(Ops[1], Ops[0]); 1428 } 1429 case ARM::BI__builtin_vld4_v: 1430 case ARM::BI__builtin_vld4q_v: { 1431 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4, &Ty, 1); 1432 Value *Align = GetPointeeAlignment(*this, E->getArg(1)); 1433 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld4"); 1434 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 1435 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1436 return Builder.CreateStore(Ops[1], Ops[0]); 1437 } 1438 case ARM::BI__builtin_vld2_lane_v: 1439 case ARM::BI__builtin_vld2q_lane_v: { 1440 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2lane, &Ty, 1); 1441 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 1442 Ops[3] = Builder.CreateBitCast(Ops[3], Ty); 1443 Ops.push_back(GetPointeeAlignment(*this, E->getArg(1))); 1444 Ops[1] = Builder.CreateCall(F, Ops.begin() + 1, Ops.end(), "vld2_lane"); 1445 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 1446 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1447 return Builder.CreateStore(Ops[1], Ops[0]); 1448 } 1449 case ARM::BI__builtin_vld3_lane_v: 1450 case ARM::BI__builtin_vld3q_lane_v: { 1451 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3lane, &Ty, 1); 1452 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 1453 Ops[3] = Builder.CreateBitCast(Ops[3], Ty); 1454 Ops[4] = Builder.CreateBitCast(Ops[4], Ty); 1455 Ops.push_back(GetPointeeAlignment(*this, E->getArg(1))); 1456 Ops[1] = Builder.CreateCall(F, Ops.begin() + 1, Ops.end(), "vld3_lane"); 1457 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 1458 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1459 return Builder.CreateStore(Ops[1], Ops[0]); 1460 } 1461 case ARM::BI__builtin_vld4_lane_v: 1462 case ARM::BI__builtin_vld4q_lane_v: { 1463 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4lane, &Ty, 1); 1464 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 1465 Ops[3] = Builder.CreateBitCast(Ops[3], Ty); 1466 Ops[4] = Builder.CreateBitCast(Ops[4], Ty); 1467 Ops[5] = Builder.CreateBitCast(Ops[5], Ty); 1468 Ops.push_back(GetPointeeAlignment(*this, E->getArg(1))); 1469 Ops[1] = Builder.CreateCall(F, Ops.begin() + 1, Ops.end(), "vld3_lane"); 1470 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 1471 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1472 return Builder.CreateStore(Ops[1], Ops[0]); 1473 } 1474 case ARM::BI__builtin_vld2_dup_v: 1475 case ARM::BI__builtin_vld3_dup_v: 1476 case ARM::BI__builtin_vld4_dup_v: { 1477 // Handle 64-bit elements as a special-case. There is no "dup" needed. 1478 if (VTy->getElementType()->getPrimitiveSizeInBits() == 64) { 1479 switch (BuiltinID) { 1480 case ARM::BI__builtin_vld2_dup_v: 1481 Int = Intrinsic::arm_neon_vld2; 1482 break; 1483 case ARM::BI__builtin_vld3_dup_v: 1484 Int = Intrinsic::arm_neon_vld2; 1485 break; 1486 case ARM::BI__builtin_vld4_dup_v: 1487 Int = Intrinsic::arm_neon_vld2; 1488 break; 1489 default: assert(0 && "unknown vld_dup intrinsic?"); 1490 } 1491 Function *F = CGM.getIntrinsic(Int, &Ty, 1); 1492 Value *Align = GetPointeeAlignment(*this, E->getArg(1)); 1493 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld_dup"); 1494 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 1495 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1496 return Builder.CreateStore(Ops[1], Ops[0]); 1497 } 1498 switch (BuiltinID) { 1499 case ARM::BI__builtin_vld2_dup_v: 1500 Int = Intrinsic::arm_neon_vld2lane; 1501 break; 1502 case ARM::BI__builtin_vld3_dup_v: 1503 Int = Intrinsic::arm_neon_vld2lane; 1504 break; 1505 case ARM::BI__builtin_vld4_dup_v: 1506 Int = Intrinsic::arm_neon_vld2lane; 1507 break; 1508 default: assert(0 && "unknown vld_dup intrinsic?"); 1509 } 1510 Function *F = CGM.getIntrinsic(Int, &Ty, 1); 1511 const llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType()); 1512 1513 SmallVector<Value*, 6> Args; 1514 Args.push_back(Ops[1]); 1515 Args.append(STy->getNumElements(), UndefValue::get(Ty)); 1516 1517 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0); 1518 Args.push_back(CI); 1519 Args.push_back(GetPointeeAlignment(*this, E->getArg(1))); 1520 1521 Ops[1] = Builder.CreateCall(F, Args.begin(), Args.end(), "vld_dup"); 1522 // splat lane 0 to all elts in each vector of the result. 1523 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1524 Value *Val = Builder.CreateExtractValue(Ops[1], i); 1525 Value *Elt = Builder.CreateBitCast(Val, Ty); 1526 Elt = EmitNeonSplat(Elt, CI); 1527 Elt = Builder.CreateBitCast(Elt, Val->getType()); 1528 Ops[1] = Builder.CreateInsertValue(Ops[1], Elt, i); 1529 } 1530 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 1531 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1532 return Builder.CreateStore(Ops[1], Ops[0]); 1533 } 1534 case ARM::BI__builtin_vmax_v: 1535 case ARM::BI__builtin_vmaxq_v: 1536 Int = usgn ? Intrinsic::arm_neon_vmaxu : Intrinsic::arm_neon_vmaxs; 1537 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmax"); 1538 case ARM::BI__builtin_vmin_v: 1539 case ARM::BI__builtin_vminq_v: 1540 Int = usgn ? Intrinsic::arm_neon_vminu : Intrinsic::arm_neon_vmins; 1541 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmin"); 1542 case ARM::BI__builtin_vmovl_v: { 1543 const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy); 1544 Ops[0] = Builder.CreateBitCast(Ops[0], DTy); 1545 if (usgn) 1546 return Builder.CreateZExt(Ops[0], Ty, "vmovl"); 1547 return Builder.CreateSExt(Ops[0], Ty, "vmovl"); 1548 } 1549 case ARM::BI__builtin_vmovn_v: { 1550 const llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy); 1551 Ops[0] = Builder.CreateBitCast(Ops[0], QTy); 1552 return Builder.CreateTrunc(Ops[0], Ty, "vmovn"); 1553 } 1554 case ARM::BI__builtin_vmul_v: 1555 case ARM::BI__builtin_vmulq_v: 1556 assert(poly && "vmul builtin only supported for polynomial types"); 1557 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmulp, &Ty, 1), 1558 Ops, "vmul"); 1559 case ARM::BI__builtin_vmull_v: 1560 Int = usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls; 1561 Int = poly ? (unsigned)Intrinsic::arm_neon_vmullp : Int; 1562 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmull"); 1563 case ARM::BI__builtin_vpadal_v: 1564 case ARM::BI__builtin_vpadalq_v: { 1565 Int = usgn ? Intrinsic::arm_neon_vpadalu : Intrinsic::arm_neon_vpadals; 1566 // The source operand type has twice as many elements of half the size. 1567 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); 1568 const llvm::Type *EltTy = 1569 llvm::IntegerType::get(getLLVMContext(), EltBits / 2); 1570 const llvm::Type *NarrowTy = 1571 llvm::VectorType::get(EltTy, VTy->getNumElements() * 2); 1572 const llvm::Type *Tys[2] = { Ty, NarrowTy }; 1573 return EmitNeonCall(CGM.getIntrinsic(Int, Tys, 2), Ops, "vpadal"); 1574 } 1575 case ARM::BI__builtin_vpadd_v: 1576 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vpadd, &Ty, 1), 1577 Ops, "vpadd"); 1578 case ARM::BI__builtin_vpaddl_v: 1579 case ARM::BI__builtin_vpaddlq_v: { 1580 Int = usgn ? Intrinsic::arm_neon_vpaddlu : Intrinsic::arm_neon_vpaddls; 1581 // The source operand type has twice as many elements of half the size. 1582 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); 1583 const llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2); 1584 const llvm::Type *NarrowTy = 1585 llvm::VectorType::get(EltTy, VTy->getNumElements() * 2); 1586 const llvm::Type *Tys[2] = { Ty, NarrowTy }; 1587 return EmitNeonCall(CGM.getIntrinsic(Int, Tys, 2), Ops, "vpaddl"); 1588 } 1589 case ARM::BI__builtin_vpmax_v: 1590 Int = usgn ? Intrinsic::arm_neon_vpmaxu : Intrinsic::arm_neon_vpmaxs; 1591 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vpmax"); 1592 case ARM::BI__builtin_vpmin_v: 1593 Int = usgn ? Intrinsic::arm_neon_vpminu : Intrinsic::arm_neon_vpmins; 1594 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vpmin"); 1595 case ARM::BI__builtin_vqabs_v: 1596 case ARM::BI__builtin_vqabsq_v: 1597 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqabs, &Ty, 1), 1598 Ops, "vqabs"); 1599 case ARM::BI__builtin_vqadd_v: 1600 case ARM::BI__builtin_vqaddq_v: 1601 Int = usgn ? Intrinsic::arm_neon_vqaddu : Intrinsic::arm_neon_vqadds; 1602 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqadd"); 1603 case ARM::BI__builtin_vqdmlal_v: 1604 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlal, &Ty, 1), 1605 Ops, "vqdmlal"); 1606 case ARM::BI__builtin_vqdmlsl_v: 1607 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlsl, &Ty, 1), 1608 Ops, "vqdmlsl"); 1609 case ARM::BI__builtin_vqdmulh_v: 1610 case ARM::BI__builtin_vqdmulhq_v: 1611 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmulh, &Ty, 1), 1612 Ops, "vqdmulh"); 1613 case ARM::BI__builtin_vqdmull_v: 1614 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, &Ty, 1), 1615 Ops, "vqdmull"); 1616 case ARM::BI__builtin_vqmovn_v: 1617 Int = usgn ? Intrinsic::arm_neon_vqmovnu : Intrinsic::arm_neon_vqmovns; 1618 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqmovn"); 1619 case ARM::BI__builtin_vqmovun_v: 1620 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqmovnsu, &Ty, 1), 1621 Ops, "vqdmull"); 1622 case ARM::BI__builtin_vqneg_v: 1623 case ARM::BI__builtin_vqnegq_v: 1624 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqneg, &Ty, 1), 1625 Ops, "vqneg"); 1626 case ARM::BI__builtin_vqrdmulh_v: 1627 case ARM::BI__builtin_vqrdmulhq_v: 1628 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrdmulh, &Ty, 1), 1629 Ops, "vqrdmulh"); 1630 case ARM::BI__builtin_vqrshl_v: 1631 case ARM::BI__builtin_vqrshlq_v: 1632 Int = usgn ? Intrinsic::arm_neon_vqrshiftu : Intrinsic::arm_neon_vqrshifts; 1633 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqrshl"); 1634 case ARM::BI__builtin_vqrshrn_n_v: 1635 Int = usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns; 1636 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqrshrn_n", 1637 1, true); 1638 case ARM::BI__builtin_vqrshrun_n_v: 1639 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, &Ty, 1), 1640 Ops, "vqrshrun_n", 1, true); 1641 case ARM::BI__builtin_vqshl_v: 1642 case ARM::BI__builtin_vqshlq_v: 1643 Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts; 1644 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqshl"); 1645 case ARM::BI__builtin_vqshl_n_v: 1646 case ARM::BI__builtin_vqshlq_n_v: 1647 Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts; 1648 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqshl_n", 1649 1, false); 1650 case ARM::BI__builtin_vqshlu_n_v: 1651 case ARM::BI__builtin_vqshluq_n_v: 1652 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftsu, &Ty, 1), 1653 Ops, "vqshlu", 1, false); 1654 case ARM::BI__builtin_vqshrn_n_v: 1655 Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns; 1656 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqshrn_n", 1657 1, true); 1658 case ARM::BI__builtin_vqshrun_n_v: 1659 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, &Ty, 1), 1660 Ops, "vqshrun_n", 1, true); 1661 case ARM::BI__builtin_vqsub_v: 1662 case ARM::BI__builtin_vqsubq_v: 1663 Int = usgn ? Intrinsic::arm_neon_vqsubu : Intrinsic::arm_neon_vqsubs; 1664 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqsub"); 1665 case ARM::BI__builtin_vraddhn_v: 1666 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vraddhn, &Ty, 1), 1667 Ops, "vraddhn"); 1668 case ARM::BI__builtin_vrecpe_v: 1669 case ARM::BI__builtin_vrecpeq_v: 1670 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, &Ty, 1), 1671 Ops, "vrecpe"); 1672 case ARM::BI__builtin_vrecps_v: 1673 case ARM::BI__builtin_vrecpsq_v: 1674 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecps, &Ty, 1), 1675 Ops, "vrecps"); 1676 case ARM::BI__builtin_vrhadd_v: 1677 case ARM::BI__builtin_vrhaddq_v: 1678 Int = usgn ? Intrinsic::arm_neon_vrhaddu : Intrinsic::arm_neon_vrhadds; 1679 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vrhadd"); 1680 case ARM::BI__builtin_vrshl_v: 1681 case ARM::BI__builtin_vrshlq_v: 1682 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts; 1683 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vrshl"); 1684 case ARM::BI__builtin_vrshrn_n_v: 1685 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, &Ty, 1), 1686 Ops, "vrshrn_n", 1, true); 1687 case ARM::BI__builtin_vrshr_n_v: 1688 case ARM::BI__builtin_vrshrq_n_v: 1689 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts; 1690 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vrshr_n", 1, true); 1691 case ARM::BI__builtin_vrsqrte_v: 1692 case ARM::BI__builtin_vrsqrteq_v: 1693 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrte, &Ty, 1), 1694 Ops, "vrsqrte"); 1695 case ARM::BI__builtin_vrsqrts_v: 1696 case ARM::BI__builtin_vrsqrtsq_v: 1697 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrts, &Ty, 1), 1698 Ops, "vrsqrts"); 1699 case ARM::BI__builtin_vrsra_n_v: 1700 case ARM::BI__builtin_vrsraq_n_v: 1701 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1702 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 1703 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true); 1704 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts; 1705 Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, &Ty, 1), Ops[1], Ops[2]); 1706 return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n"); 1707 case ARM::BI__builtin_vrsubhn_v: 1708 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsubhn, &Ty, 1), 1709 Ops, "vrsubhn"); 1710 case ARM::BI__builtin_vset_lane_i8: 1711 case ARM::BI__builtin_vset_lane_i16: 1712 case ARM::BI__builtin_vset_lane_i32: 1713 case ARM::BI__builtin_vset_lane_i64: 1714 case ARM::BI__builtin_vset_lane_f32: 1715 case ARM::BI__builtin_vsetq_lane_i8: 1716 case ARM::BI__builtin_vsetq_lane_i16: 1717 case ARM::BI__builtin_vsetq_lane_i32: 1718 case ARM::BI__builtin_vsetq_lane_i64: 1719 case ARM::BI__builtin_vsetq_lane_f32: 1720 Ops.push_back(EmitScalarExpr(E->getArg(2))); 1721 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane"); 1722 case ARM::BI__builtin_vshl_v: 1723 case ARM::BI__builtin_vshlq_v: 1724 Int = usgn ? Intrinsic::arm_neon_vshiftu : Intrinsic::arm_neon_vshifts; 1725 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vshl"); 1726 case ARM::BI__builtin_vshll_n_v: 1727 Int = usgn ? Intrinsic::arm_neon_vshiftlu : Intrinsic::arm_neon_vshiftls; 1728 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vshll", 1); 1729 case ARM::BI__builtin_vshl_n_v: 1730 case ARM::BI__builtin_vshlq_n_v: 1731 Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false); 1732 return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1], "vshl_n"); 1733 case ARM::BI__builtin_vshrn_n_v: 1734 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftn, &Ty, 1), 1735 Ops, "vshrn_n", 1, true); 1736 case ARM::BI__builtin_vshr_n_v: 1737 case ARM::BI__builtin_vshrq_n_v: 1738 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1739 Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false); 1740 if (usgn) 1741 return Builder.CreateLShr(Ops[0], Ops[1], "vshr_n"); 1742 else 1743 return Builder.CreateAShr(Ops[0], Ops[1], "vshr_n"); 1744 case ARM::BI__builtin_vsri_n_v: 1745 case ARM::BI__builtin_vsriq_n_v: 1746 rightShift = true; 1747 case ARM::BI__builtin_vsli_n_v: 1748 case ARM::BI__builtin_vsliq_n_v: 1749 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift); 1750 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, &Ty, 1), 1751 Ops, "vsli_n"); 1752 case ARM::BI__builtin_vsra_n_v: 1753 case ARM::BI__builtin_vsraq_n_v: 1754 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1755 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 1756 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, false); 1757 if (usgn) 1758 Ops[1] = Builder.CreateLShr(Ops[1], Ops[2], "vsra_n"); 1759 else 1760 Ops[1] = Builder.CreateAShr(Ops[1], Ops[2], "vsra_n"); 1761 return Builder.CreateAdd(Ops[0], Ops[1]); 1762 case ARM::BI__builtin_vst1_v: 1763 case ARM::BI__builtin_vst1q_v: 1764 Ops.push_back(GetPointeeAlignment(*this, E->getArg(0))); 1765 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, &Ty, 1), 1766 Ops, ""); 1767 case ARM::BI__builtin_vst1_lane_v: 1768 case ARM::BI__builtin_vst1q_lane_v: 1769 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 1770 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]); 1771 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 1772 return Builder.CreateStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty)); 1773 case ARM::BI__builtin_vst2_v: 1774 case ARM::BI__builtin_vst2q_v: 1775 Ops.push_back(GetPointeeAlignment(*this, E->getArg(0))); 1776 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2, &Ty, 1), 1777 Ops, ""); 1778 case ARM::BI__builtin_vst2_lane_v: 1779 case ARM::BI__builtin_vst2q_lane_v: 1780 Ops.push_back(GetPointeeAlignment(*this, E->getArg(0))); 1781 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2lane, &Ty, 1), 1782 Ops, ""); 1783 case ARM::BI__builtin_vst3_v: 1784 case ARM::BI__builtin_vst3q_v: 1785 Ops.push_back(GetPointeeAlignment(*this, E->getArg(0))); 1786 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3, &Ty, 1), 1787 Ops, ""); 1788 case ARM::BI__builtin_vst3_lane_v: 1789 case ARM::BI__builtin_vst3q_lane_v: 1790 Ops.push_back(GetPointeeAlignment(*this, E->getArg(0))); 1791 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3lane, &Ty, 1), 1792 Ops, ""); 1793 case ARM::BI__builtin_vst4_v: 1794 case ARM::BI__builtin_vst4q_v: 1795 Ops.push_back(GetPointeeAlignment(*this, E->getArg(0))); 1796 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4, &Ty, 1), 1797 Ops, ""); 1798 case ARM::BI__builtin_vst4_lane_v: 1799 case ARM::BI__builtin_vst4q_lane_v: 1800 Ops.push_back(GetPointeeAlignment(*this, E->getArg(0))); 1801 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4lane, &Ty, 1), 1802 Ops, ""); 1803 case ARM::BI__builtin_vsubhn_v: 1804 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vsubhn, &Ty, 1), 1805 Ops, "vsubhn"); 1806 case ARM::BI__builtin_vtbl1_v: 1807 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1), 1808 Ops, "vtbl1"); 1809 case ARM::BI__builtin_vtbl2_v: 1810 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2), 1811 Ops, "vtbl2"); 1812 case ARM::BI__builtin_vtbl3_v: 1813 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3), 1814 Ops, "vtbl3"); 1815 case ARM::BI__builtin_vtbl4_v: 1816 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4), 1817 Ops, "vtbl4"); 1818 case ARM::BI__builtin_vtbx1_v: 1819 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1), 1820 Ops, "vtbx1"); 1821 case ARM::BI__builtin_vtbx2_v: 1822 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2), 1823 Ops, "vtbx2"); 1824 case ARM::BI__builtin_vtbx3_v: 1825 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3), 1826 Ops, "vtbx3"); 1827 case ARM::BI__builtin_vtbx4_v: 1828 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4), 1829 Ops, "vtbx4"); 1830 case ARM::BI__builtin_vtst_v: 1831 case ARM::BI__builtin_vtstq_v: { 1832 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1833 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 1834 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]); 1835 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0], 1836 ConstantAggregateZero::get(Ty)); 1837 return Builder.CreateSExt(Ops[0], Ty, "vtst"); 1838 } 1839 case ARM::BI__builtin_vtrn_v: 1840 case ARM::BI__builtin_vtrnq_v: { 1841 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); 1842 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 1843 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 1844 Value *SV = 0; 1845 1846 for (unsigned vi = 0; vi != 2; ++vi) { 1847 SmallVector<Constant*, 16> Indices; 1848 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { 1849 Indices.push_back(ConstantInt::get(Int32Ty, i+vi)); 1850 Indices.push_back(ConstantInt::get(Int32Ty, i+e+vi)); 1851 } 1852 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi); 1853 SV = llvm::ConstantVector::get(Indices); 1854 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn"); 1855 SV = Builder.CreateStore(SV, Addr); 1856 } 1857 return SV; 1858 } 1859 case ARM::BI__builtin_vuzp_v: 1860 case ARM::BI__builtin_vuzpq_v: { 1861 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); 1862 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 1863 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 1864 Value *SV = 0; 1865 1866 for (unsigned vi = 0; vi != 2; ++vi) { 1867 SmallVector<Constant*, 16> Indices; 1868 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) 1869 Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi)); 1870 1871 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi); 1872 SV = llvm::ConstantVector::get(Indices); 1873 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp"); 1874 SV = Builder.CreateStore(SV, Addr); 1875 } 1876 return SV; 1877 } 1878 case ARM::BI__builtin_vzip_v: 1879 case ARM::BI__builtin_vzipq_v: { 1880 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); 1881 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 1882 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 1883 Value *SV = 0; 1884 1885 for (unsigned vi = 0; vi != 2; ++vi) { 1886 SmallVector<Constant*, 16> Indices; 1887 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { 1888 Indices.push_back(ConstantInt::get(Int32Ty, (i + vi*e) >> 1)); 1889 Indices.push_back(ConstantInt::get(Int32Ty, ((i + vi*e) >> 1)+e)); 1890 } 1891 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi); 1892 SV = llvm::ConstantVector::get(Indices); 1893 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip"); 1894 SV = Builder.CreateStore(SV, Addr); 1895 } 1896 return SV; 1897 } 1898 } 1899} 1900 1901llvm::Value *CodeGenFunction:: 1902BuildVector(const llvm::SmallVectorImpl<llvm::Value*> &Ops) { 1903 assert((Ops.size() & (Ops.size() - 1)) == 0 && 1904 "Not a power-of-two sized vector!"); 1905 bool AllConstants = true; 1906 for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i) 1907 AllConstants &= isa<Constant>(Ops[i]); 1908 1909 // If this is a constant vector, create a ConstantVector. 1910 if (AllConstants) { 1911 std::vector<llvm::Constant*> CstOps; 1912 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1913 CstOps.push_back(cast<Constant>(Ops[i])); 1914 return llvm::ConstantVector::get(CstOps); 1915 } 1916 1917 // Otherwise, insertelement the values to build the vector. 1918 Value *Result = 1919 llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), Ops.size())); 1920 1921 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1922 Result = Builder.CreateInsertElement(Result, Ops[i], 1923 llvm::ConstantInt::get(llvm::Type::getInt32Ty(getLLVMContext()), i)); 1924 1925 return Result; 1926} 1927 1928Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, 1929 const CallExpr *E) { 1930 llvm::SmallVector<Value*, 4> Ops; 1931 1932 // Find out if any arguments are required to be integer constant expressions. 1933 unsigned ICEArguments = 0; 1934 ASTContext::GetBuiltinTypeError Error; 1935 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); 1936 assert(Error == ASTContext::GE_None && "Should not codegen an error"); 1937 1938 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) { 1939 // If this is a normal argument, just emit it as a scalar. 1940 if ((ICEArguments & (1 << i)) == 0) { 1941 Ops.push_back(EmitScalarExpr(E->getArg(i))); 1942 continue; 1943 } 1944 1945 // If this is required to be a constant, constant fold it so that we know 1946 // that the generated intrinsic gets a ConstantInt. 1947 llvm::APSInt Result; 1948 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext()); 1949 assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst; 1950 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result)); 1951 } 1952 1953 switch (BuiltinID) { 1954 default: return 0; 1955 case X86::BI__builtin_ia32_pslldi128: 1956 case X86::BI__builtin_ia32_psllqi128: 1957 case X86::BI__builtin_ia32_psllwi128: 1958 case X86::BI__builtin_ia32_psradi128: 1959 case X86::BI__builtin_ia32_psrawi128: 1960 case X86::BI__builtin_ia32_psrldi128: 1961 case X86::BI__builtin_ia32_psrlqi128: 1962 case X86::BI__builtin_ia32_psrlwi128: { 1963 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty, "zext"); 1964 const llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 2); 1965 llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0); 1966 Ops[1] = Builder.CreateInsertElement(llvm::UndefValue::get(Ty), 1967 Ops[1], Zero, "insert"); 1968 Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType(), "bitcast"); 1969 const char *name = 0; 1970 Intrinsic::ID ID = Intrinsic::not_intrinsic; 1971 1972 switch (BuiltinID) { 1973 default: assert(0 && "Unsupported shift intrinsic!"); 1974 case X86::BI__builtin_ia32_pslldi128: 1975 name = "pslldi"; 1976 ID = Intrinsic::x86_sse2_psll_d; 1977 break; 1978 case X86::BI__builtin_ia32_psllqi128: 1979 name = "psllqi"; 1980 ID = Intrinsic::x86_sse2_psll_q; 1981 break; 1982 case X86::BI__builtin_ia32_psllwi128: 1983 name = "psllwi"; 1984 ID = Intrinsic::x86_sse2_psll_w; 1985 break; 1986 case X86::BI__builtin_ia32_psradi128: 1987 name = "psradi"; 1988 ID = Intrinsic::x86_sse2_psra_d; 1989 break; 1990 case X86::BI__builtin_ia32_psrawi128: 1991 name = "psrawi"; 1992 ID = Intrinsic::x86_sse2_psra_w; 1993 break; 1994 case X86::BI__builtin_ia32_psrldi128: 1995 name = "psrldi"; 1996 ID = Intrinsic::x86_sse2_psrl_d; 1997 break; 1998 case X86::BI__builtin_ia32_psrlqi128: 1999 name = "psrlqi"; 2000 ID = Intrinsic::x86_sse2_psrl_q; 2001 break; 2002 case X86::BI__builtin_ia32_psrlwi128: 2003 name = "psrlwi"; 2004 ID = Intrinsic::x86_sse2_psrl_w; 2005 break; 2006 } 2007 llvm::Function *F = CGM.getIntrinsic(ID); 2008 return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name); 2009 } 2010 case X86::BI__builtin_ia32_vec_init_v8qi: 2011 case X86::BI__builtin_ia32_vec_init_v4hi: 2012 case X86::BI__builtin_ia32_vec_init_v2si: 2013 return Builder.CreateBitCast(BuildVector(Ops), 2014 llvm::Type::getX86_MMXTy(getLLVMContext())); 2015 case X86::BI__builtin_ia32_vec_ext_v2si: 2016 return Builder.CreateExtractElement(Ops[0], 2017 llvm::ConstantInt::get(Ops[1]->getType(), 0)); 2018 case X86::BI__builtin_ia32_pslldi: 2019 case X86::BI__builtin_ia32_psllqi: 2020 case X86::BI__builtin_ia32_psllwi: 2021 case X86::BI__builtin_ia32_psradi: 2022 case X86::BI__builtin_ia32_psrawi: 2023 case X86::BI__builtin_ia32_psrldi: 2024 case X86::BI__builtin_ia32_psrlqi: 2025 case X86::BI__builtin_ia32_psrlwi: { 2026 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty, "zext"); 2027 const llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 1); 2028 Ops[1] = Builder.CreateBitCast(Ops[1], Ty, "bitcast"); 2029 const char *name = 0; 2030 Intrinsic::ID ID = Intrinsic::not_intrinsic; 2031 2032 switch (BuiltinID) { 2033 default: assert(0 && "Unsupported shift intrinsic!"); 2034 case X86::BI__builtin_ia32_pslldi: 2035 name = "pslldi"; 2036 ID = Intrinsic::x86_mmx_psll_d; 2037 break; 2038 case X86::BI__builtin_ia32_psllqi: 2039 name = "psllqi"; 2040 ID = Intrinsic::x86_mmx_psll_q; 2041 break; 2042 case X86::BI__builtin_ia32_psllwi: 2043 name = "psllwi"; 2044 ID = Intrinsic::x86_mmx_psll_w; 2045 break; 2046 case X86::BI__builtin_ia32_psradi: 2047 name = "psradi"; 2048 ID = Intrinsic::x86_mmx_psra_d; 2049 break; 2050 case X86::BI__builtin_ia32_psrawi: 2051 name = "psrawi"; 2052 ID = Intrinsic::x86_mmx_psra_w; 2053 break; 2054 case X86::BI__builtin_ia32_psrldi: 2055 name = "psrldi"; 2056 ID = Intrinsic::x86_mmx_psrl_d; 2057 break; 2058 case X86::BI__builtin_ia32_psrlqi: 2059 name = "psrlqi"; 2060 ID = Intrinsic::x86_mmx_psrl_q; 2061 break; 2062 case X86::BI__builtin_ia32_psrlwi: 2063 name = "psrlwi"; 2064 ID = Intrinsic::x86_mmx_psrl_w; 2065 break; 2066 } 2067 llvm::Function *F = CGM.getIntrinsic(ID); 2068 return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name); 2069 } 2070 case X86::BI__builtin_ia32_cmpps: { 2071 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ps); 2072 return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpps"); 2073 } 2074 case X86::BI__builtin_ia32_cmpss: { 2075 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ss); 2076 return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpss"); 2077 } 2078 case X86::BI__builtin_ia32_ldmxcsr: { 2079 const llvm::Type *PtrTy = Int8PtrTy; 2080 Value *One = llvm::ConstantInt::get(Int32Ty, 1); 2081 Value *Tmp = Builder.CreateAlloca(Int32Ty, One, "tmp"); 2082 Builder.CreateStore(Ops[0], Tmp); 2083 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr), 2084 Builder.CreateBitCast(Tmp, PtrTy)); 2085 } 2086 case X86::BI__builtin_ia32_stmxcsr: { 2087 const llvm::Type *PtrTy = Int8PtrTy; 2088 Value *One = llvm::ConstantInt::get(Int32Ty, 1); 2089 Value *Tmp = Builder.CreateAlloca(Int32Ty, One, "tmp"); 2090 One = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr), 2091 Builder.CreateBitCast(Tmp, PtrTy)); 2092 return Builder.CreateLoad(Tmp, "stmxcsr"); 2093 } 2094 case X86::BI__builtin_ia32_cmppd: { 2095 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_pd); 2096 return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmppd"); 2097 } 2098 case X86::BI__builtin_ia32_cmpsd: { 2099 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_sd); 2100 return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpsd"); 2101 } 2102 case X86::BI__builtin_ia32_storehps: 2103 case X86::BI__builtin_ia32_storelps: { 2104 llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty); 2105 llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2); 2106 2107 // cast val v2i64 2108 Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast"); 2109 2110 // extract (0, 1) 2111 unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1; 2112 llvm::Value *Idx = llvm::ConstantInt::get(Int32Ty, Index); 2113 Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract"); 2114 2115 // cast pointer to i64 & store 2116 Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy); 2117 return Builder.CreateStore(Ops[1], Ops[0]); 2118 } 2119 case X86::BI__builtin_ia32_palignr: { 2120 unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); 2121 2122 // If palignr is shifting the pair of input vectors less than 9 bytes, 2123 // emit a shuffle instruction. 2124 if (shiftVal <= 8) { 2125 llvm::SmallVector<llvm::Constant*, 8> Indices; 2126 for (unsigned i = 0; i != 8; ++i) 2127 Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i)); 2128 2129 Value* SV = llvm::ConstantVector::get(Indices); 2130 return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr"); 2131 } 2132 2133 // If palignr is shifting the pair of input vectors more than 8 but less 2134 // than 16 bytes, emit a logical right shift of the destination. 2135 if (shiftVal < 16) { 2136 // MMX has these as 1 x i64 vectors for some odd optimization reasons. 2137 const llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 1); 2138 2139 Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast"); 2140 Ops[1] = llvm::ConstantInt::get(VecTy, (shiftVal-8) * 8); 2141 2142 // create i32 constant 2143 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_mmx_psrl_q); 2144 return Builder.CreateCall(F, &Ops[0], &Ops[0] + 2, "palignr"); 2145 } 2146 2147 // If palignr is shifting the pair of vectors more than 32 bytes, emit zero. 2148 return llvm::Constant::getNullValue(ConvertType(E->getType())); 2149 } 2150 case X86::BI__builtin_ia32_palignr128: { 2151 unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); 2152 2153 // If palignr is shifting the pair of input vectors less than 17 bytes, 2154 // emit a shuffle instruction. 2155 if (shiftVal <= 16) { 2156 llvm::SmallVector<llvm::Constant*, 16> Indices; 2157 for (unsigned i = 0; i != 16; ++i) 2158 Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i)); 2159 2160 Value* SV = llvm::ConstantVector::get(Indices); 2161 return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr"); 2162 } 2163 2164 // If palignr is shifting the pair of input vectors more than 16 but less 2165 // than 32 bytes, emit a logical right shift of the destination. 2166 if (shiftVal < 32) { 2167 const llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2); 2168 2169 Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast"); 2170 Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8); 2171 2172 // create i32 constant 2173 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq); 2174 return Builder.CreateCall(F, &Ops[0], &Ops[0] + 2, "palignr"); 2175 } 2176 2177 // If palignr is shifting the pair of vectors more than 32 bytes, emit zero. 2178 return llvm::Constant::getNullValue(ConvertType(E->getType())); 2179 } 2180 case X86::BI__builtin_ia32_movntps: 2181 case X86::BI__builtin_ia32_movntpd: 2182 case X86::BI__builtin_ia32_movntdq: 2183 case X86::BI__builtin_ia32_movnti: { 2184 llvm::MDNode *Node = llvm::MDNode::get(getLLVMContext(), 2185 Builder.getInt32(1)); 2186 2187 // Convert the type of the pointer to a pointer to the stored type. 2188 Value *BC = Builder.CreateBitCast(Ops[0], 2189 llvm::PointerType::getUnqual(Ops[1]->getType()), 2190 "cast"); 2191 StoreInst *SI = Builder.CreateStore(Ops[1], BC); 2192 SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node); 2193 SI->setAlignment(16); 2194 return SI; 2195 } 2196 // 3DNow! 2197 case X86::BI__builtin_ia32_pavgusb: 2198 case X86::BI__builtin_ia32_pf2id: 2199 case X86::BI__builtin_ia32_pfacc: 2200 case X86::BI__builtin_ia32_pfadd: 2201 case X86::BI__builtin_ia32_pfcmpeq: 2202 case X86::BI__builtin_ia32_pfcmpge: 2203 case X86::BI__builtin_ia32_pfcmpgt: 2204 case X86::BI__builtin_ia32_pfmax: 2205 case X86::BI__builtin_ia32_pfmin: 2206 case X86::BI__builtin_ia32_pfmul: 2207 case X86::BI__builtin_ia32_pfrcp: 2208 case X86::BI__builtin_ia32_pfrcpit1: 2209 case X86::BI__builtin_ia32_pfrcpit2: 2210 case X86::BI__builtin_ia32_pfrsqrt: 2211 case X86::BI__builtin_ia32_pfrsqit1: 2212 case X86::BI__builtin_ia32_pfrsqrtit1: 2213 case X86::BI__builtin_ia32_pfsub: 2214 case X86::BI__builtin_ia32_pfsubr: 2215 case X86::BI__builtin_ia32_pi2fd: 2216 case X86::BI__builtin_ia32_pmulhrw: 2217 case X86::BI__builtin_ia32_pf2iw: 2218 case X86::BI__builtin_ia32_pfnacc: 2219 case X86::BI__builtin_ia32_pfpnacc: 2220 case X86::BI__builtin_ia32_pi2fw: 2221 case X86::BI__builtin_ia32_pswapdsf: 2222 case X86::BI__builtin_ia32_pswapdsi: { 2223 const char *name = 0; 2224 Intrinsic::ID ID = Intrinsic::not_intrinsic; 2225 switch(BuiltinID) { 2226 case X86::BI__builtin_ia32_pavgusb: 2227 name = "pavgusb"; 2228 ID = Intrinsic::x86_3dnow_pavgusb; 2229 break; 2230 case X86::BI__builtin_ia32_pf2id: 2231 name = "pf2id"; 2232 ID = Intrinsic::x86_3dnow_pf2id; 2233 break; 2234 case X86::BI__builtin_ia32_pfacc: 2235 name = "pfacc"; 2236 ID = Intrinsic::x86_3dnow_pfacc; 2237 break; 2238 case X86::BI__builtin_ia32_pfadd: 2239 name = "pfadd"; 2240 ID = Intrinsic::x86_3dnow_pfadd; 2241 break; 2242 case X86::BI__builtin_ia32_pfcmpeq: 2243 name = "pfcmpeq"; 2244 ID = Intrinsic::x86_3dnow_pfcmpeq; 2245 break; 2246 case X86::BI__builtin_ia32_pfcmpge: 2247 name = "pfcmpge"; 2248 ID = Intrinsic::x86_3dnow_pfcmpge; 2249 break; 2250 case X86::BI__builtin_ia32_pfcmpgt: 2251 name = "pfcmpgt"; 2252 ID = Intrinsic::x86_3dnow_pfcmpgt; 2253 break; 2254 case X86::BI__builtin_ia32_pfmax: 2255 name = "pfmax"; 2256 ID = Intrinsic::x86_3dnow_pfmax; 2257 break; 2258 case X86::BI__builtin_ia32_pfmin: 2259 name = "pfmin"; 2260 ID = Intrinsic::x86_3dnow_pfmin; 2261 break; 2262 case X86::BI__builtin_ia32_pfmul: 2263 name = "pfmul"; 2264 ID = Intrinsic::x86_3dnow_pfmul; 2265 break; 2266 case X86::BI__builtin_ia32_pfrcp: 2267 name = "pfrcp"; 2268 ID = Intrinsic::x86_3dnow_pfrcp; 2269 break; 2270 case X86::BI__builtin_ia32_pfrcpit1: 2271 name = "pfrcpit1"; 2272 ID = Intrinsic::x86_3dnow_pfrcpit1; 2273 break; 2274 case X86::BI__builtin_ia32_pfrcpit2: 2275 name = "pfrcpit2"; 2276 ID = Intrinsic::x86_3dnow_pfrcpit2; 2277 break; 2278 case X86::BI__builtin_ia32_pfrsqrt: 2279 name = "pfrsqrt"; 2280 ID = Intrinsic::x86_3dnow_pfrsqrt; 2281 break; 2282 case X86::BI__builtin_ia32_pfrsqit1: 2283 case X86::BI__builtin_ia32_pfrsqrtit1: 2284 name = "pfrsqit1"; 2285 ID = Intrinsic::x86_3dnow_pfrsqit1; 2286 break; 2287 case X86::BI__builtin_ia32_pfsub: 2288 name = "pfsub"; 2289 ID = Intrinsic::x86_3dnow_pfsub; 2290 break; 2291 case X86::BI__builtin_ia32_pfsubr: 2292 name = "pfsubr"; 2293 ID = Intrinsic::x86_3dnow_pfsubr; 2294 break; 2295 case X86::BI__builtin_ia32_pi2fd: 2296 name = "pi2fd"; 2297 ID = Intrinsic::x86_3dnow_pi2fd; 2298 break; 2299 case X86::BI__builtin_ia32_pmulhrw: 2300 name = "pmulhrw"; 2301 ID = Intrinsic::x86_3dnow_pmulhrw; 2302 break; 2303 case X86::BI__builtin_ia32_pf2iw: 2304 name = "pf2iw"; 2305 ID = Intrinsic::x86_3dnowa_pf2iw; 2306 break; 2307 case X86::BI__builtin_ia32_pfnacc: 2308 name = "pfnacc"; 2309 ID = Intrinsic::x86_3dnowa_pfnacc; 2310 break; 2311 case X86::BI__builtin_ia32_pfpnacc: 2312 name = "pfpnacc"; 2313 ID = Intrinsic::x86_3dnowa_pfpnacc; 2314 break; 2315 case X86::BI__builtin_ia32_pi2fw: 2316 name = "pi2fw"; 2317 ID = Intrinsic::x86_3dnowa_pi2fw; 2318 break; 2319 case X86::BI__builtin_ia32_pswapdsf: 2320 case X86::BI__builtin_ia32_pswapdsi: 2321 name = "pswapd"; 2322 ID = Intrinsic::x86_3dnowa_pswapd; 2323 break; 2324 } 2325 llvm::Function *F = CGM.getIntrinsic(ID); 2326 return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name); 2327 } 2328 } 2329} 2330 2331Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID, 2332 const CallExpr *E) { 2333 llvm::SmallVector<Value*, 4> Ops; 2334 2335 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) 2336 Ops.push_back(EmitScalarExpr(E->getArg(i))); 2337 2338 Intrinsic::ID ID = Intrinsic::not_intrinsic; 2339 2340 switch (BuiltinID) { 2341 default: return 0; 2342 2343 // vec_ld, vec_lvsl, vec_lvsr 2344 case PPC::BI__builtin_altivec_lvx: 2345 case PPC::BI__builtin_altivec_lvxl: 2346 case PPC::BI__builtin_altivec_lvebx: 2347 case PPC::BI__builtin_altivec_lvehx: 2348 case PPC::BI__builtin_altivec_lvewx: 2349 case PPC::BI__builtin_altivec_lvsl: 2350 case PPC::BI__builtin_altivec_lvsr: 2351 { 2352 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy); 2353 2354 Ops[0] = Builder.CreateGEP(Ops[1], Ops[0], "tmp"); 2355 Ops.pop_back(); 2356 2357 switch (BuiltinID) { 2358 default: assert(0 && "Unsupported ld/lvsl/lvsr intrinsic!"); 2359 case PPC::BI__builtin_altivec_lvx: 2360 ID = Intrinsic::ppc_altivec_lvx; 2361 break; 2362 case PPC::BI__builtin_altivec_lvxl: 2363 ID = Intrinsic::ppc_altivec_lvxl; 2364 break; 2365 case PPC::BI__builtin_altivec_lvebx: 2366 ID = Intrinsic::ppc_altivec_lvebx; 2367 break; 2368 case PPC::BI__builtin_altivec_lvehx: 2369 ID = Intrinsic::ppc_altivec_lvehx; 2370 break; 2371 case PPC::BI__builtin_altivec_lvewx: 2372 ID = Intrinsic::ppc_altivec_lvewx; 2373 break; 2374 case PPC::BI__builtin_altivec_lvsl: 2375 ID = Intrinsic::ppc_altivec_lvsl; 2376 break; 2377 case PPC::BI__builtin_altivec_lvsr: 2378 ID = Intrinsic::ppc_altivec_lvsr; 2379 break; 2380 } 2381 llvm::Function *F = CGM.getIntrinsic(ID); 2382 return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), ""); 2383 } 2384 2385 // vec_st 2386 case PPC::BI__builtin_altivec_stvx: 2387 case PPC::BI__builtin_altivec_stvxl: 2388 case PPC::BI__builtin_altivec_stvebx: 2389 case PPC::BI__builtin_altivec_stvehx: 2390 case PPC::BI__builtin_altivec_stvewx: 2391 { 2392 Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy); 2393 Ops[1] = Builder.CreateGEP(Ops[2], Ops[1], "tmp"); 2394 Ops.pop_back(); 2395 2396 switch (BuiltinID) { 2397 default: assert(0 && "Unsupported st intrinsic!"); 2398 case PPC::BI__builtin_altivec_stvx: 2399 ID = Intrinsic::ppc_altivec_stvx; 2400 break; 2401 case PPC::BI__builtin_altivec_stvxl: 2402 ID = Intrinsic::ppc_altivec_stvxl; 2403 break; 2404 case PPC::BI__builtin_altivec_stvebx: 2405 ID = Intrinsic::ppc_altivec_stvebx; 2406 break; 2407 case PPC::BI__builtin_altivec_stvehx: 2408 ID = Intrinsic::ppc_altivec_stvehx; 2409 break; 2410 case PPC::BI__builtin_altivec_stvewx: 2411 ID = Intrinsic::ppc_altivec_stvewx; 2412 break; 2413 } 2414 llvm::Function *F = CGM.getIntrinsic(ID); 2415 return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), ""); 2416 } 2417 } 2418 return 0; 2419} 2420