CGBuiltin.cpp revision a4cc79994b2f12663143dc31ceaf198e703fb914
1//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This contains code to emit Builtin calls as LLVM code. 11// 12//===----------------------------------------------------------------------===// 13 14#include "TargetInfo.h" 15#include "CodeGenFunction.h" 16#include "CodeGenModule.h" 17#include "CGObjCRuntime.h" 18#include "clang/Basic/TargetInfo.h" 19#include "clang/AST/APValue.h" 20#include "clang/AST/ASTContext.h" 21#include "clang/AST/Decl.h" 22#include "clang/Basic/TargetBuiltins.h" 23#include "llvm/Intrinsics.h" 24#include "llvm/Target/TargetData.h" 25using namespace clang; 26using namespace CodeGen; 27using namespace llvm; 28 29static void EmitMemoryBarrier(CodeGenFunction &CGF, 30 bool LoadLoad, bool LoadStore, 31 bool StoreLoad, bool StoreStore, 32 bool Device) { 33 Value *True = llvm::ConstantInt::getTrue(CGF.getLLVMContext()); 34 Value *False = llvm::ConstantInt::getFalse(CGF.getLLVMContext()); 35 Value *C[5] = { LoadLoad ? True : False, 36 LoadStore ? True : False, 37 StoreLoad ? True : False, 38 StoreStore ? True : False, 39 Device ? True : False }; 40 CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(Intrinsic::memory_barrier), 41 C, C + 5); 42} 43 44/// Emit the conversions required to turn the given value into an 45/// integer of the given size. 46static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V, 47 QualType T, const llvm::IntegerType *IntType) { 48 V = CGF.EmitToMemory(V, T); 49 50 if (V->getType()->isPointerTy()) 51 return CGF.Builder.CreatePtrToInt(V, IntType); 52 53 assert(V->getType() == IntType); 54 return V; 55} 56 57static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V, 58 QualType T, const llvm::Type *ResultType) { 59 V = CGF.EmitFromMemory(V, T); 60 61 if (ResultType->isPointerTy()) 62 return CGF.Builder.CreateIntToPtr(V, ResultType); 63 64 assert(V->getType() == ResultType); 65 return V; 66} 67 68// The atomic builtins are also full memory barriers. This is a utility for 69// wrapping a call to the builtins with memory barriers. 70static Value *EmitCallWithBarrier(CodeGenFunction &CGF, Value *Fn, 71 Value **ArgBegin, Value **ArgEnd) { 72 // FIXME: We need a target hook for whether this applies to device memory or 73 // not. 74 bool Device = true; 75 76 // Create barriers both before and after the call. 77 EmitMemoryBarrier(CGF, true, true, true, true, Device); 78 Value *Result = CGF.Builder.CreateCall(Fn, ArgBegin, ArgEnd); 79 EmitMemoryBarrier(CGF, true, true, true, true, Device); 80 return Result; 81} 82 83/// Utility to insert an atomic instruction based on Instrinsic::ID 84/// and the expression node. 85static RValue EmitBinaryAtomic(CodeGenFunction &CGF, 86 Intrinsic::ID Id, const CallExpr *E) { 87 QualType T = E->getType(); 88 assert(E->getArg(0)->getType()->isPointerType()); 89 assert(CGF.getContext().hasSameUnqualifiedType(T, 90 E->getArg(0)->getType()->getPointeeType())); 91 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())); 92 93 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); 94 unsigned AddrSpace = 95 cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace(); 96 97 const llvm::IntegerType *IntType = 98 llvm::IntegerType::get(CGF.getLLVMContext(), 99 CGF.getContext().getTypeSize(T)); 100 const llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); 101 102 const llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType }; 103 llvm::Value *AtomF = CGF.CGM.getIntrinsic(Id, IntrinsicTypes, 2); 104 105 llvm::Value *Args[2]; 106 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); 107 Args[1] = CGF.EmitScalarExpr(E->getArg(1)); 108 const llvm::Type *ValueType = Args[1]->getType(); 109 Args[1] = EmitToInt(CGF, Args[1], T, IntType); 110 111 llvm::Value *Result = EmitCallWithBarrier(CGF, AtomF, Args, Args + 2); 112 Result = EmitFromInt(CGF, Result, T, ValueType); 113 return RValue::get(Result); 114} 115 116/// Utility to insert an atomic instruction based Instrinsic::ID and 117/// the expression node, where the return value is the result of the 118/// operation. 119static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF, 120 Intrinsic::ID Id, const CallExpr *E, 121 Instruction::BinaryOps Op) { 122 QualType T = E->getType(); 123 assert(E->getArg(0)->getType()->isPointerType()); 124 assert(CGF.getContext().hasSameUnqualifiedType(T, 125 E->getArg(0)->getType()->getPointeeType())); 126 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())); 127 128 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); 129 unsigned AddrSpace = 130 cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace(); 131 132 const llvm::IntegerType *IntType = 133 llvm::IntegerType::get(CGF.getLLVMContext(), 134 CGF.getContext().getTypeSize(T)); 135 const llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); 136 137 const llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType }; 138 llvm::Value *AtomF = CGF.CGM.getIntrinsic(Id, IntrinsicTypes, 2); 139 140 llvm::Value *Args[2]; 141 Args[1] = CGF.EmitScalarExpr(E->getArg(1)); 142 const llvm::Type *ValueType = Args[1]->getType(); 143 Args[1] = EmitToInt(CGF, Args[1], T, IntType); 144 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); 145 146 llvm::Value *Result = EmitCallWithBarrier(CGF, AtomF, Args, Args + 2); 147 Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]); 148 Result = EmitFromInt(CGF, Result, T, ValueType); 149 return RValue::get(Result); 150} 151 152/// EmitFAbs - Emit a call to fabs/fabsf/fabsl, depending on the type of ValTy, 153/// which must be a scalar floating point type. 154static Value *EmitFAbs(CodeGenFunction &CGF, Value *V, QualType ValTy) { 155 const BuiltinType *ValTyP = ValTy->getAs<BuiltinType>(); 156 assert(ValTyP && "isn't scalar fp type!"); 157 158 StringRef FnName; 159 switch (ValTyP->getKind()) { 160 default: assert(0 && "Isn't a scalar fp type!"); 161 case BuiltinType::Float: FnName = "fabsf"; break; 162 case BuiltinType::Double: FnName = "fabs"; break; 163 case BuiltinType::LongDouble: FnName = "fabsl"; break; 164 } 165 166 // The prototype is something that takes and returns whatever V's type is. 167 std::vector<const llvm::Type*> Args; 168 Args.push_back(V->getType()); 169 llvm::FunctionType *FT = llvm::FunctionType::get(V->getType(), Args, false); 170 llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction(FT, FnName); 171 172 return CGF.Builder.CreateCall(Fn, V, "abs"); 173} 174 175RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, 176 unsigned BuiltinID, const CallExpr *E) { 177 // See if we can constant fold this builtin. If so, don't emit it at all. 178 Expr::EvalResult Result; 179 if (E->Evaluate(Result, CGM.getContext())) { 180 if (Result.Val.isInt()) 181 return RValue::get(llvm::ConstantInt::get(VMContext, 182 Result.Val.getInt())); 183 if (Result.Val.isFloat()) 184 return RValue::get(ConstantFP::get(VMContext, Result.Val.getFloat())); 185 } 186 187 switch (BuiltinID) { 188 default: break; // Handle intrinsics and libm functions below. 189 case Builtin::BI__builtin___CFStringMakeConstantString: 190 case Builtin::BI__builtin___NSStringMakeConstantString: 191 return RValue::get(CGM.EmitConstantExpr(E, E->getType(), 0)); 192 case Builtin::BI__builtin_stdarg_start: 193 case Builtin::BI__builtin_va_start: 194 case Builtin::BI__builtin_va_end: { 195 Value *ArgValue = EmitVAListRef(E->getArg(0)); 196 const llvm::Type *DestType = llvm::Type::getInt8PtrTy(VMContext); 197 if (ArgValue->getType() != DestType) 198 ArgValue = Builder.CreateBitCast(ArgValue, DestType, 199 ArgValue->getName().data()); 200 201 Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_end) ? 202 Intrinsic::vaend : Intrinsic::vastart; 203 return RValue::get(Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue)); 204 } 205 case Builtin::BI__builtin_va_copy: { 206 Value *DstPtr = EmitVAListRef(E->getArg(0)); 207 Value *SrcPtr = EmitVAListRef(E->getArg(1)); 208 209 const llvm::Type *Type = llvm::Type::getInt8PtrTy(VMContext); 210 211 DstPtr = Builder.CreateBitCast(DstPtr, Type); 212 SrcPtr = Builder.CreateBitCast(SrcPtr, Type); 213 return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy), 214 DstPtr, SrcPtr)); 215 } 216 case Builtin::BI__builtin_abs: { 217 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 218 219 Value *NegOp = Builder.CreateNeg(ArgValue, "neg"); 220 Value *CmpResult = 221 Builder.CreateICmpSGE(ArgValue, 222 llvm::Constant::getNullValue(ArgValue->getType()), 223 "abscond"); 224 Value *Result = 225 Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs"); 226 227 return RValue::get(Result); 228 } 229 case Builtin::BI__builtin_ctz: 230 case Builtin::BI__builtin_ctzl: 231 case Builtin::BI__builtin_ctzll: { 232 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 233 234 const llvm::Type *ArgType = ArgValue->getType(); 235 Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1); 236 237 const llvm::Type *ResultType = ConvertType(E->getType()); 238 Value *Result = Builder.CreateCall(F, ArgValue, "tmp"); 239 if (Result->getType() != ResultType) 240 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, 241 "cast"); 242 return RValue::get(Result); 243 } 244 case Builtin::BI__builtin_clz: 245 case Builtin::BI__builtin_clzl: 246 case Builtin::BI__builtin_clzll: { 247 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 248 249 const llvm::Type *ArgType = ArgValue->getType(); 250 Value *F = CGM.getIntrinsic(Intrinsic::ctlz, &ArgType, 1); 251 252 const llvm::Type *ResultType = ConvertType(E->getType()); 253 Value *Result = Builder.CreateCall(F, ArgValue, "tmp"); 254 if (Result->getType() != ResultType) 255 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, 256 "cast"); 257 return RValue::get(Result); 258 } 259 case Builtin::BI__builtin_ffs: 260 case Builtin::BI__builtin_ffsl: 261 case Builtin::BI__builtin_ffsll: { 262 // ffs(x) -> x ? cttz(x) + 1 : 0 263 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 264 265 const llvm::Type *ArgType = ArgValue->getType(); 266 Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1); 267 268 const llvm::Type *ResultType = ConvertType(E->getType()); 269 Value *Tmp = Builder.CreateAdd(Builder.CreateCall(F, ArgValue, "tmp"), 270 llvm::ConstantInt::get(ArgType, 1), "tmp"); 271 Value *Zero = llvm::Constant::getNullValue(ArgType); 272 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero"); 273 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs"); 274 if (Result->getType() != ResultType) 275 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, 276 "cast"); 277 return RValue::get(Result); 278 } 279 case Builtin::BI__builtin_parity: 280 case Builtin::BI__builtin_parityl: 281 case Builtin::BI__builtin_parityll: { 282 // parity(x) -> ctpop(x) & 1 283 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 284 285 const llvm::Type *ArgType = ArgValue->getType(); 286 Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1); 287 288 const llvm::Type *ResultType = ConvertType(E->getType()); 289 Value *Tmp = Builder.CreateCall(F, ArgValue, "tmp"); 290 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1), 291 "tmp"); 292 if (Result->getType() != ResultType) 293 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, 294 "cast"); 295 return RValue::get(Result); 296 } 297 case Builtin::BI__builtin_popcount: 298 case Builtin::BI__builtin_popcountl: 299 case Builtin::BI__builtin_popcountll: { 300 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 301 302 const llvm::Type *ArgType = ArgValue->getType(); 303 Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1); 304 305 const llvm::Type *ResultType = ConvertType(E->getType()); 306 Value *Result = Builder.CreateCall(F, ArgValue, "tmp"); 307 if (Result->getType() != ResultType) 308 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, 309 "cast"); 310 return RValue::get(Result); 311 } 312 case Builtin::BI__builtin_expect: { 313 // FIXME: pass expect through to LLVM 314 if (E->getArg(1)->HasSideEffects(getContext())) 315 (void)EmitScalarExpr(E->getArg(1)); 316 return RValue::get(EmitScalarExpr(E->getArg(0))); 317 } 318 case Builtin::BI__builtin_bswap32: 319 case Builtin::BI__builtin_bswap64: { 320 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 321 const llvm::Type *ArgType = ArgValue->getType(); 322 Value *F = CGM.getIntrinsic(Intrinsic::bswap, &ArgType, 1); 323 return RValue::get(Builder.CreateCall(F, ArgValue, "tmp")); 324 } 325 case Builtin::BI__builtin_object_size: { 326 // We pass this builtin onto the optimizer so that it can 327 // figure out the object size in more complex cases. 328 const llvm::Type *ResType[] = { 329 ConvertType(E->getType()) 330 }; 331 332 // LLVM only supports 0 and 2, make sure that we pass along that 333 // as a boolean. 334 Value *Ty = EmitScalarExpr(E->getArg(1)); 335 ConstantInt *CI = dyn_cast<ConstantInt>(Ty); 336 assert(CI); 337 uint64_t val = CI->getZExtValue(); 338 CI = ConstantInt::get(llvm::Type::getInt1Ty(VMContext), (val & 0x2) >> 1); 339 340 Value *F = CGM.getIntrinsic(Intrinsic::objectsize, ResType, 1); 341 return RValue::get(Builder.CreateCall2(F, 342 EmitScalarExpr(E->getArg(0)), 343 CI)); 344 } 345 case Builtin::BI__builtin_prefetch: { 346 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0)); 347 // FIXME: Technically these constants should of type 'int', yes? 348 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) : 349 llvm::ConstantInt::get(Int32Ty, 0); 350 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : 351 llvm::ConstantInt::get(Int32Ty, 3); 352 Value *F = CGM.getIntrinsic(Intrinsic::prefetch, 0, 0); 353 return RValue::get(Builder.CreateCall3(F, Address, RW, Locality)); 354 } 355 case Builtin::BI__builtin_trap: { 356 Value *F = CGM.getIntrinsic(Intrinsic::trap, 0, 0); 357 return RValue::get(Builder.CreateCall(F)); 358 } 359 case Builtin::BI__builtin_unreachable: { 360 if (CatchUndefined && HaveInsertPoint()) 361 EmitBranch(getTrapBB()); 362 Value *V = Builder.CreateUnreachable(); 363 Builder.ClearInsertionPoint(); 364 return RValue::get(V); 365 } 366 367 case Builtin::BI__builtin_powi: 368 case Builtin::BI__builtin_powif: 369 case Builtin::BI__builtin_powil: { 370 Value *Base = EmitScalarExpr(E->getArg(0)); 371 Value *Exponent = EmitScalarExpr(E->getArg(1)); 372 const llvm::Type *ArgType = Base->getType(); 373 Value *F = CGM.getIntrinsic(Intrinsic::powi, &ArgType, 1); 374 return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp")); 375 } 376 377 case Builtin::BI__builtin_isgreater: 378 case Builtin::BI__builtin_isgreaterequal: 379 case Builtin::BI__builtin_isless: 380 case Builtin::BI__builtin_islessequal: 381 case Builtin::BI__builtin_islessgreater: 382 case Builtin::BI__builtin_isunordered: { 383 // Ordered comparisons: we know the arguments to these are matching scalar 384 // floating point values. 385 Value *LHS = EmitScalarExpr(E->getArg(0)); 386 Value *RHS = EmitScalarExpr(E->getArg(1)); 387 388 switch (BuiltinID) { 389 default: assert(0 && "Unknown ordered comparison"); 390 case Builtin::BI__builtin_isgreater: 391 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp"); 392 break; 393 case Builtin::BI__builtin_isgreaterequal: 394 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp"); 395 break; 396 case Builtin::BI__builtin_isless: 397 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp"); 398 break; 399 case Builtin::BI__builtin_islessequal: 400 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp"); 401 break; 402 case Builtin::BI__builtin_islessgreater: 403 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp"); 404 break; 405 case Builtin::BI__builtin_isunordered: 406 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp"); 407 break; 408 } 409 // ZExt bool to int type. 410 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType()), 411 "tmp")); 412 } 413 case Builtin::BI__builtin_isnan: { 414 Value *V = EmitScalarExpr(E->getArg(0)); 415 V = Builder.CreateFCmpUNO(V, V, "cmp"); 416 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()), "tmp")); 417 } 418 419 case Builtin::BI__builtin_isinf: { 420 // isinf(x) --> fabs(x) == infinity 421 Value *V = EmitScalarExpr(E->getArg(0)); 422 V = EmitFAbs(*this, V, E->getArg(0)->getType()); 423 424 V = Builder.CreateFCmpOEQ(V, ConstantFP::getInfinity(V->getType()),"isinf"); 425 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()), "tmp")); 426 } 427 428 // TODO: BI__builtin_isinf_sign 429 // isinf_sign(x) -> isinf(x) ? (signbit(x) ? -1 : 1) : 0 430 431 case Builtin::BI__builtin_isnormal: { 432 // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min 433 Value *V = EmitScalarExpr(E->getArg(0)); 434 Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq"); 435 436 Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType()); 437 Value *IsLessThanInf = 438 Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf"); 439 APFloat Smallest = APFloat::getSmallestNormalized( 440 getContext().getFloatTypeSemantics(E->getArg(0)->getType())); 441 Value *IsNormal = 442 Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest), 443 "isnormal"); 444 V = Builder.CreateAnd(Eq, IsLessThanInf, "and"); 445 V = Builder.CreateAnd(V, IsNormal, "and"); 446 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); 447 } 448 449 case Builtin::BI__builtin_isfinite: { 450 // isfinite(x) --> x == x && fabs(x) != infinity; } 451 Value *V = EmitScalarExpr(E->getArg(0)); 452 Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq"); 453 454 Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType()); 455 Value *IsNotInf = 456 Builder.CreateFCmpUNE(Abs, ConstantFP::getInfinity(V->getType()),"isinf"); 457 458 V = Builder.CreateAnd(Eq, IsNotInf, "and"); 459 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); 460 } 461 462 case Builtin::BI__builtin_fpclassify: { 463 Value *V = EmitScalarExpr(E->getArg(5)); 464 const llvm::Type *Ty = ConvertType(E->getArg(5)->getType()); 465 466 // Create Result 467 BasicBlock *Begin = Builder.GetInsertBlock(); 468 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn); 469 Builder.SetInsertPoint(End); 470 PHINode *Result = 471 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 472 "fpclassify_result"); 473 474 // if (V==0) return FP_ZERO 475 Builder.SetInsertPoint(Begin); 476 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty), 477 "iszero"); 478 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4)); 479 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn); 480 Builder.CreateCondBr(IsZero, End, NotZero); 481 Result->addIncoming(ZeroLiteral, Begin); 482 483 // if (V != V) return FP_NAN 484 Builder.SetInsertPoint(NotZero); 485 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp"); 486 Value *NanLiteral = EmitScalarExpr(E->getArg(0)); 487 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn); 488 Builder.CreateCondBr(IsNan, End, NotNan); 489 Result->addIncoming(NanLiteral, NotZero); 490 491 // if (fabs(V) == infinity) return FP_INFINITY 492 Builder.SetInsertPoint(NotNan); 493 Value *VAbs = EmitFAbs(*this, V, E->getArg(5)->getType()); 494 Value *IsInf = 495 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()), 496 "isinf"); 497 Value *InfLiteral = EmitScalarExpr(E->getArg(1)); 498 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn); 499 Builder.CreateCondBr(IsInf, End, NotInf); 500 Result->addIncoming(InfLiteral, NotNan); 501 502 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL 503 Builder.SetInsertPoint(NotInf); 504 APFloat Smallest = APFloat::getSmallestNormalized( 505 getContext().getFloatTypeSemantics(E->getArg(5)->getType())); 506 Value *IsNormal = 507 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest), 508 "isnormal"); 509 Value *NormalResult = 510 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)), 511 EmitScalarExpr(E->getArg(3))); 512 Builder.CreateBr(End); 513 Result->addIncoming(NormalResult, NotInf); 514 515 // return Result 516 Builder.SetInsertPoint(End); 517 return RValue::get(Result); 518 } 519 520 case Builtin::BIalloca: 521 case Builtin::BI__builtin_alloca: { 522 Value *Size = EmitScalarExpr(E->getArg(0)); 523 return RValue::get(Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), Size, "tmp")); 524 } 525 case Builtin::BIbzero: 526 case Builtin::BI__builtin_bzero: { 527 Value *Address = EmitScalarExpr(E->getArg(0)); 528 Value *SizeVal = EmitScalarExpr(E->getArg(1)); 529 Builder.CreateCall5(CGM.getMemSetFn(Address->getType(), SizeVal->getType()), 530 Address, 531 llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 0), 532 SizeVal, 533 llvm::ConstantInt::get(Int32Ty, 1), 534 llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0)); 535 return RValue::get(Address); 536 } 537 case Builtin::BImemcpy: 538 case Builtin::BI__builtin_memcpy: { 539 Value *Address = EmitScalarExpr(E->getArg(0)); 540 Value *SrcAddr = EmitScalarExpr(E->getArg(1)); 541 Value *SizeVal = EmitScalarExpr(E->getArg(2)); 542 Builder.CreateCall5(CGM.getMemCpyFn(Address->getType(), SrcAddr->getType(), 543 SizeVal->getType()), 544 Address, SrcAddr, SizeVal, 545 llvm::ConstantInt::get(Int32Ty, 1), 546 llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0)); 547 return RValue::get(Address); 548 } 549 550 case Builtin::BI__builtin_objc_memmove_collectable: { 551 Value *Address = EmitScalarExpr(E->getArg(0)); 552 Value *SrcAddr = EmitScalarExpr(E->getArg(1)); 553 Value *SizeVal = EmitScalarExpr(E->getArg(2)); 554 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, 555 Address, SrcAddr, SizeVal); 556 return RValue::get(Address); 557 } 558 559 case Builtin::BImemmove: 560 case Builtin::BI__builtin_memmove: { 561 Value *Address = EmitScalarExpr(E->getArg(0)); 562 Value *SrcAddr = EmitScalarExpr(E->getArg(1)); 563 Value *SizeVal = EmitScalarExpr(E->getArg(2)); 564 Builder.CreateCall5(CGM.getMemMoveFn(Address->getType(), SrcAddr->getType(), 565 SizeVal->getType()), 566 Address, SrcAddr, SizeVal, 567 llvm::ConstantInt::get(Int32Ty, 1), 568 llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0)); 569 return RValue::get(Address); 570 } 571 case Builtin::BImemset: 572 case Builtin::BI__builtin_memset: { 573 Value *Address = EmitScalarExpr(E->getArg(0)); 574 Value *SizeVal = EmitScalarExpr(E->getArg(2)); 575 Builder.CreateCall5(CGM.getMemSetFn(Address->getType(), SizeVal->getType()), 576 Address, 577 Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), 578 llvm::Type::getInt8Ty(VMContext)), 579 SizeVal, 580 llvm::ConstantInt::get(Int32Ty, 1), 581 llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0)); 582 return RValue::get(Address); 583 } 584 case Builtin::BI__builtin_dwarf_cfa: { 585 // The offset in bytes from the first argument to the CFA. 586 // 587 // Why on earth is this in the frontend? Is there any reason at 588 // all that the backend can't reasonably determine this while 589 // lowering llvm.eh.dwarf.cfa()? 590 // 591 // TODO: If there's a satisfactory reason, add a target hook for 592 // this instead of hard-coding 0, which is correct for most targets. 593 int32_t Offset = 0; 594 595 Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa, 0, 0); 596 return RValue::get(Builder.CreateCall(F, 597 llvm::ConstantInt::get(Int32Ty, Offset))); 598 } 599 case Builtin::BI__builtin_return_address: { 600 Value *Depth = EmitScalarExpr(E->getArg(0)); 601 Depth = Builder.CreateIntCast(Depth, Int32Ty, false, "tmp"); 602 Value *F = CGM.getIntrinsic(Intrinsic::returnaddress, 0, 0); 603 return RValue::get(Builder.CreateCall(F, Depth)); 604 } 605 case Builtin::BI__builtin_frame_address: { 606 Value *Depth = EmitScalarExpr(E->getArg(0)); 607 Depth = Builder.CreateIntCast(Depth, Int32Ty, false, "tmp"); 608 Value *F = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0); 609 return RValue::get(Builder.CreateCall(F, Depth)); 610 } 611 case Builtin::BI__builtin_extract_return_addr: { 612 Value *Address = EmitScalarExpr(E->getArg(0)); 613 Value *Result = getTargetHooks().decodeReturnAddress(*this, Address); 614 return RValue::get(Result); 615 } 616 case Builtin::BI__builtin_frob_return_addr: { 617 Value *Address = EmitScalarExpr(E->getArg(0)); 618 Value *Result = getTargetHooks().encodeReturnAddress(*this, Address); 619 return RValue::get(Result); 620 } 621 case Builtin::BI__builtin_dwarf_sp_column: { 622 const llvm::IntegerType *Ty 623 = cast<llvm::IntegerType>(ConvertType(E->getType())); 624 int Column = getTargetHooks().getDwarfEHStackPointer(CGM); 625 if (Column == -1) { 626 CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column"); 627 return RValue::get(llvm::UndefValue::get(Ty)); 628 } 629 return RValue::get(llvm::ConstantInt::get(Ty, Column, true)); 630 } 631 case Builtin::BI__builtin_init_dwarf_reg_size_table: { 632 Value *Address = EmitScalarExpr(E->getArg(0)); 633 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address)) 634 CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table"); 635 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType()))); 636 } 637 case Builtin::BI__builtin_eh_return: { 638 Value *Int = EmitScalarExpr(E->getArg(0)); 639 Value *Ptr = EmitScalarExpr(E->getArg(1)); 640 641 const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType()); 642 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) && 643 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants"); 644 Value *F = CGM.getIntrinsic(IntTy->getBitWidth() == 32 645 ? Intrinsic::eh_return_i32 646 : Intrinsic::eh_return_i64, 647 0, 0); 648 Builder.CreateCall2(F, Int, Ptr); 649 Value *V = Builder.CreateUnreachable(); 650 Builder.ClearInsertionPoint(); 651 return RValue::get(V); 652 } 653 case Builtin::BI__builtin_unwind_init: { 654 Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init, 0, 0); 655 return RValue::get(Builder.CreateCall(F)); 656 } 657 case Builtin::BI__builtin_extend_pointer: { 658 // Extends a pointer to the size of an _Unwind_Word, which is 659 // uint64_t on all platforms. Generally this gets poked into a 660 // register and eventually used as an address, so if the 661 // addressing registers are wider than pointers and the platform 662 // doesn't implicitly ignore high-order bits when doing 663 // addressing, we need to make sure we zext / sext based on 664 // the platform's expectations. 665 // 666 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html 667 668 LLVMContext &C = CGM.getLLVMContext(); 669 670 // Cast the pointer to intptr_t. 671 Value *Ptr = EmitScalarExpr(E->getArg(0)); 672 const llvm::IntegerType *IntPtrTy = CGM.getTargetData().getIntPtrType(C); 673 Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast"); 674 675 // If that's 64 bits, we're done. 676 if (IntPtrTy->getBitWidth() == 64) 677 return RValue::get(Result); 678 679 // Otherwise, ask the codegen data what to do. 680 if (getTargetHooks().extendPointerWithSExt()) 681 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext")); 682 else 683 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext")); 684 } 685 case Builtin::BI__builtin_setjmp: { 686 // Buffer is a void**. 687 Value *Buf = EmitScalarExpr(E->getArg(0)); 688 689 // Store the frame pointer to the setjmp buffer. 690 Value *FrameAddr = 691 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress), 692 ConstantInt::get(Int32Ty, 0)); 693 Builder.CreateStore(FrameAddr, Buf); 694 695 // Store the stack pointer to the setjmp buffer. 696 Value *StackAddr = 697 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave)); 698 Value *StackSaveSlot = 699 Builder.CreateGEP(Buf, ConstantInt::get(Int32Ty, 2)); 700 Builder.CreateStore(StackAddr, StackSaveSlot); 701 702 // Call LLVM's EH setjmp, which is lightweight. 703 Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp); 704 Buf = Builder.CreateBitCast(Buf, llvm::Type::getInt8PtrTy(VMContext)); 705 return RValue::get(Builder.CreateCall(F, Buf)); 706 } 707 case Builtin::BI__builtin_longjmp: { 708 Value *Buf = EmitScalarExpr(E->getArg(0)); 709 Buf = Builder.CreateBitCast(Buf, llvm::Type::getInt8PtrTy(VMContext)); 710 711 // Call LLVM's EH longjmp, which is lightweight. 712 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf); 713 714 // longjmp doesn't return; mark this as unreachable 715 Value *V = Builder.CreateUnreachable(); 716 Builder.ClearInsertionPoint(); 717 return RValue::get(V); 718 } 719 case Builtin::BI__sync_fetch_and_add: 720 case Builtin::BI__sync_fetch_and_sub: 721 case Builtin::BI__sync_fetch_and_or: 722 case Builtin::BI__sync_fetch_and_and: 723 case Builtin::BI__sync_fetch_and_xor: 724 case Builtin::BI__sync_add_and_fetch: 725 case Builtin::BI__sync_sub_and_fetch: 726 case Builtin::BI__sync_and_and_fetch: 727 case Builtin::BI__sync_or_and_fetch: 728 case Builtin::BI__sync_xor_and_fetch: 729 case Builtin::BI__sync_val_compare_and_swap: 730 case Builtin::BI__sync_bool_compare_and_swap: 731 case Builtin::BI__sync_lock_test_and_set: 732 case Builtin::BI__sync_lock_release: 733 assert(0 && "Shouldn't make it through sema"); 734 case Builtin::BI__sync_fetch_and_add_1: 735 case Builtin::BI__sync_fetch_and_add_2: 736 case Builtin::BI__sync_fetch_and_add_4: 737 case Builtin::BI__sync_fetch_and_add_8: 738 case Builtin::BI__sync_fetch_and_add_16: 739 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_add, E); 740 case Builtin::BI__sync_fetch_and_sub_1: 741 case Builtin::BI__sync_fetch_and_sub_2: 742 case Builtin::BI__sync_fetch_and_sub_4: 743 case Builtin::BI__sync_fetch_and_sub_8: 744 case Builtin::BI__sync_fetch_and_sub_16: 745 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_sub, E); 746 case Builtin::BI__sync_fetch_and_or_1: 747 case Builtin::BI__sync_fetch_and_or_2: 748 case Builtin::BI__sync_fetch_and_or_4: 749 case Builtin::BI__sync_fetch_and_or_8: 750 case Builtin::BI__sync_fetch_and_or_16: 751 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_or, E); 752 case Builtin::BI__sync_fetch_and_and_1: 753 case Builtin::BI__sync_fetch_and_and_2: 754 case Builtin::BI__sync_fetch_and_and_4: 755 case Builtin::BI__sync_fetch_and_and_8: 756 case Builtin::BI__sync_fetch_and_and_16: 757 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_and, E); 758 case Builtin::BI__sync_fetch_and_xor_1: 759 case Builtin::BI__sync_fetch_and_xor_2: 760 case Builtin::BI__sync_fetch_and_xor_4: 761 case Builtin::BI__sync_fetch_and_xor_8: 762 case Builtin::BI__sync_fetch_and_xor_16: 763 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_xor, E); 764 765 // Clang extensions: not overloaded yet. 766 case Builtin::BI__sync_fetch_and_min: 767 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_min, E); 768 case Builtin::BI__sync_fetch_and_max: 769 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_max, E); 770 case Builtin::BI__sync_fetch_and_umin: 771 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umin, E); 772 case Builtin::BI__sync_fetch_and_umax: 773 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umax, E); 774 775 case Builtin::BI__sync_add_and_fetch_1: 776 case Builtin::BI__sync_add_and_fetch_2: 777 case Builtin::BI__sync_add_and_fetch_4: 778 case Builtin::BI__sync_add_and_fetch_8: 779 case Builtin::BI__sync_add_and_fetch_16: 780 return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_add, E, 781 llvm::Instruction::Add); 782 case Builtin::BI__sync_sub_and_fetch_1: 783 case Builtin::BI__sync_sub_and_fetch_2: 784 case Builtin::BI__sync_sub_and_fetch_4: 785 case Builtin::BI__sync_sub_and_fetch_8: 786 case Builtin::BI__sync_sub_and_fetch_16: 787 return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_sub, E, 788 llvm::Instruction::Sub); 789 case Builtin::BI__sync_and_and_fetch_1: 790 case Builtin::BI__sync_and_and_fetch_2: 791 case Builtin::BI__sync_and_and_fetch_4: 792 case Builtin::BI__sync_and_and_fetch_8: 793 case Builtin::BI__sync_and_and_fetch_16: 794 return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_and, E, 795 llvm::Instruction::And); 796 case Builtin::BI__sync_or_and_fetch_1: 797 case Builtin::BI__sync_or_and_fetch_2: 798 case Builtin::BI__sync_or_and_fetch_4: 799 case Builtin::BI__sync_or_and_fetch_8: 800 case Builtin::BI__sync_or_and_fetch_16: 801 return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_or, E, 802 llvm::Instruction::Or); 803 case Builtin::BI__sync_xor_and_fetch_1: 804 case Builtin::BI__sync_xor_and_fetch_2: 805 case Builtin::BI__sync_xor_and_fetch_4: 806 case Builtin::BI__sync_xor_and_fetch_8: 807 case Builtin::BI__sync_xor_and_fetch_16: 808 return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_xor, E, 809 llvm::Instruction::Xor); 810 811 case Builtin::BI__sync_val_compare_and_swap_1: 812 case Builtin::BI__sync_val_compare_and_swap_2: 813 case Builtin::BI__sync_val_compare_and_swap_4: 814 case Builtin::BI__sync_val_compare_and_swap_8: 815 case Builtin::BI__sync_val_compare_and_swap_16: { 816 QualType T = E->getType(); 817 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); 818 unsigned AddrSpace = 819 cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace(); 820 821 const llvm::IntegerType *IntType = 822 llvm::IntegerType::get(CGF.getLLVMContext(), 823 CGF.getContext().getTypeSize(T)); 824 const llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); 825 const llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType }; 826 Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, 827 IntrinsicTypes, 2); 828 829 Value *Args[3]; 830 Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType); 831 Args[1] = CGF.EmitScalarExpr(E->getArg(1)); 832 const llvm::Type *ValueType = Args[1]->getType(); 833 Args[1] = EmitToInt(CGF, Args[1], T, IntType); 834 Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType); 835 836 Value *Result = EmitCallWithBarrier(CGF, AtomF, Args, Args + 3); 837 Result = EmitFromInt(CGF, Result, T, ValueType); 838 return RValue::get(Result); 839 } 840 841 case Builtin::BI__sync_bool_compare_and_swap_1: 842 case Builtin::BI__sync_bool_compare_and_swap_2: 843 case Builtin::BI__sync_bool_compare_and_swap_4: 844 case Builtin::BI__sync_bool_compare_and_swap_8: 845 case Builtin::BI__sync_bool_compare_and_swap_16: { 846 QualType T = E->getArg(1)->getType(); 847 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); 848 unsigned AddrSpace = 849 cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace(); 850 851 const llvm::IntegerType *IntType = 852 llvm::IntegerType::get(CGF.getLLVMContext(), 853 CGF.getContext().getTypeSize(T)); 854 const llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); 855 const llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType }; 856 Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, 857 IntrinsicTypes, 2); 858 859 Value *Args[3]; 860 Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType); 861 Args[1] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(1)), T, IntType); 862 Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType); 863 864 Value *OldVal = Args[1]; 865 Value *PrevVal = EmitCallWithBarrier(*this, AtomF, Args, Args + 3); 866 Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal); 867 // zext bool to int. 868 Result = Builder.CreateZExt(Result, ConvertType(E->getType())); 869 return RValue::get(Result); 870 } 871 872 case Builtin::BI__sync_lock_test_and_set_1: 873 case Builtin::BI__sync_lock_test_and_set_2: 874 case Builtin::BI__sync_lock_test_and_set_4: 875 case Builtin::BI__sync_lock_test_and_set_8: 876 case Builtin::BI__sync_lock_test_and_set_16: 877 return EmitBinaryAtomic(*this, Intrinsic::atomic_swap, E); 878 879 case Builtin::BI__sync_lock_release_1: 880 case Builtin::BI__sync_lock_release_2: 881 case Builtin::BI__sync_lock_release_4: 882 case Builtin::BI__sync_lock_release_8: 883 case Builtin::BI__sync_lock_release_16: { 884 Value *Ptr = EmitScalarExpr(E->getArg(0)); 885 const llvm::Type *ElTy = 886 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 887 llvm::StoreInst *Store = 888 Builder.CreateStore(llvm::Constant::getNullValue(ElTy), Ptr); 889 Store->setVolatile(true); 890 return RValue::get(0); 891 } 892 893 case Builtin::BI__sync_synchronize: { 894 // We assume like gcc appears to, that this only applies to cached memory. 895 EmitMemoryBarrier(*this, true, true, true, true, false); 896 return RValue::get(0); 897 } 898 899 case Builtin::BI__builtin_llvm_memory_barrier: { 900 Value *C[5] = { 901 EmitScalarExpr(E->getArg(0)), 902 EmitScalarExpr(E->getArg(1)), 903 EmitScalarExpr(E->getArg(2)), 904 EmitScalarExpr(E->getArg(3)), 905 EmitScalarExpr(E->getArg(4)) 906 }; 907 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::memory_barrier), C, C + 5); 908 return RValue::get(0); 909 } 910 911 // Library functions with special handling. 912 case Builtin::BIsqrt: 913 case Builtin::BIsqrtf: 914 case Builtin::BIsqrtl: { 915 // TODO: there is currently no set of optimizer flags 916 // sufficient for us to rewrite sqrt to @llvm.sqrt. 917 // -fmath-errno=0 is not good enough; we need finiteness. 918 // We could probably precondition the call with an ult 919 // against 0, but is that worth the complexity? 920 break; 921 } 922 923 case Builtin::BIpow: 924 case Builtin::BIpowf: 925 case Builtin::BIpowl: { 926 // Rewrite sqrt to intrinsic if allowed. 927 if (!FD->hasAttr<ConstAttr>()) 928 break; 929 Value *Base = EmitScalarExpr(E->getArg(0)); 930 Value *Exponent = EmitScalarExpr(E->getArg(1)); 931 const llvm::Type *ArgType = Base->getType(); 932 Value *F = CGM.getIntrinsic(Intrinsic::pow, &ArgType, 1); 933 return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp")); 934 } 935 936 case Builtin::BI__builtin_signbit: 937 case Builtin::BI__builtin_signbitf: 938 case Builtin::BI__builtin_signbitl: { 939 LLVMContext &C = CGM.getLLVMContext(); 940 941 Value *Arg = EmitScalarExpr(E->getArg(0)); 942 const llvm::Type *ArgTy = Arg->getType(); 943 if (ArgTy->isPPC_FP128Ty()) 944 break; // FIXME: I'm not sure what the right implementation is here. 945 int ArgWidth = ArgTy->getPrimitiveSizeInBits(); 946 const llvm::Type *ArgIntTy = llvm::IntegerType::get(C, ArgWidth); 947 Value *BCArg = Builder.CreateBitCast(Arg, ArgIntTy); 948 Value *ZeroCmp = llvm::Constant::getNullValue(ArgIntTy); 949 Value *Result = Builder.CreateICmpSLT(BCArg, ZeroCmp); 950 return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType()))); 951 } 952 } 953 954 // If this is an alias for a libm function (e.g. __builtin_sin) turn it into 955 // that function. 956 if (getContext().BuiltinInfo.isLibFunction(BuiltinID) || 957 getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID)) 958 return EmitCall(E->getCallee()->getType(), 959 CGM.getBuiltinLibFunction(FD, BuiltinID), 960 ReturnValueSlot(), 961 E->arg_begin(), E->arg_end()); 962 963 // See if we have a target specific intrinsic. 964 const char *Name = getContext().BuiltinInfo.GetName(BuiltinID); 965 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic; 966 if (const char *Prefix = 967 llvm::Triple::getArchTypePrefix(Target.getTriple().getArch())) 968 IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix, Name); 969 970 if (IntrinsicID != Intrinsic::not_intrinsic) { 971 SmallVector<Value*, 16> Args; 972 973 // Find out if any arguments are required to be integer constant 974 // expressions. 975 unsigned ICEArguments = 0; 976 ASTContext::GetBuiltinTypeError Error; 977 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); 978 assert(Error == ASTContext::GE_None && "Should not codegen an error"); 979 980 Function *F = CGM.getIntrinsic(IntrinsicID); 981 const llvm::FunctionType *FTy = F->getFunctionType(); 982 983 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) { 984 Value *ArgValue; 985 // If this is a normal argument, just emit it as a scalar. 986 if ((ICEArguments & (1 << i)) == 0) { 987 ArgValue = EmitScalarExpr(E->getArg(i)); 988 } else { 989 // If this is required to be a constant, constant fold it so that we 990 // know that the generated intrinsic gets a ConstantInt. 991 llvm::APSInt Result; 992 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result,getContext()); 993 assert(IsConst && "Constant arg isn't actually constant?"); 994 (void)IsConst; 995 ArgValue = llvm::ConstantInt::get(VMContext, Result); 996 } 997 998 // If the intrinsic arg type is different from the builtin arg type 999 // we need to do a bit cast. 1000 const llvm::Type *PTy = FTy->getParamType(i); 1001 if (PTy != ArgValue->getType()) { 1002 assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) && 1003 "Must be able to losslessly bit cast to param"); 1004 ArgValue = Builder.CreateBitCast(ArgValue, PTy); 1005 } 1006 1007 Args.push_back(ArgValue); 1008 } 1009 1010 Value *V = Builder.CreateCall(F, Args.data(), Args.data() + Args.size()); 1011 QualType BuiltinRetType = E->getType(); 1012 1013 const llvm::Type *RetTy = llvm::Type::getVoidTy(VMContext); 1014 if (!BuiltinRetType->isVoidType()) RetTy = ConvertType(BuiltinRetType); 1015 1016 if (RetTy != V->getType()) { 1017 assert(V->getType()->canLosslesslyBitCastTo(RetTy) && 1018 "Must be able to losslessly bit cast result type"); 1019 V = Builder.CreateBitCast(V, RetTy); 1020 } 1021 1022 return RValue::get(V); 1023 } 1024 1025 // See if we have a target specific builtin that needs to be lowered. 1026 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E)) 1027 return RValue::get(V); 1028 1029 ErrorUnsupported(E, "builtin function"); 1030 1031 // Unknown builtin, for now just dump it out and return undef. 1032 if (hasAggregateLLVMType(E->getType())) 1033 return RValue::getAggregate(CreateMemTemp(E->getType())); 1034 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType()))); 1035} 1036 1037Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID, 1038 const CallExpr *E) { 1039 switch (Target.getTriple().getArch()) { 1040 case llvm::Triple::arm: 1041 case llvm::Triple::thumb: 1042 return EmitARMBuiltinExpr(BuiltinID, E); 1043 case llvm::Triple::x86: 1044 case llvm::Triple::x86_64: 1045 return EmitX86BuiltinExpr(BuiltinID, E); 1046 case llvm::Triple::ppc: 1047 case llvm::Triple::ppc64: 1048 return EmitPPCBuiltinExpr(BuiltinID, E); 1049 default: 1050 return 0; 1051 } 1052} 1053 1054const llvm::VectorType *GetNeonType(LLVMContext &C, unsigned type, bool q) { 1055 switch (type) { 1056 default: break; 1057 case 0: 1058 case 5: return llvm::VectorType::get(llvm::Type::getInt8Ty(C), 8 << (int)q); 1059 case 6: 1060 case 7: 1061 case 1: return llvm::VectorType::get(llvm::Type::getInt16Ty(C),4 << (int)q); 1062 case 2: return llvm::VectorType::get(llvm::Type::getInt32Ty(C),2 << (int)q); 1063 case 3: return llvm::VectorType::get(llvm::Type::getInt64Ty(C),1 << (int)q); 1064 case 4: return llvm::VectorType::get(llvm::Type::getFloatTy(C),2 << (int)q); 1065 }; 1066 return 0; 1067} 1068 1069Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) { 1070 unsigned nElts = cast<llvm::VectorType>(V->getType())->getNumElements(); 1071 SmallVector<Constant*, 16> Indices(nElts, C); 1072 Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size()); 1073 return Builder.CreateShuffleVector(V, V, SV, "lane"); 1074} 1075 1076Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops, 1077 const char *name, bool splat, 1078 unsigned shift, bool rightshift) { 1079 unsigned j = 0; 1080 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end(); 1081 ai != ae; ++ai, ++j) 1082 if (shift > 0 && shift == j) 1083 Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift); 1084 else 1085 Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name); 1086 1087 if (splat) { 1088 Ops[j-1] = EmitNeonSplat(Ops[j-1], cast<Constant>(Ops[j])); 1089 Ops.resize(j); 1090 } 1091 return Builder.CreateCall(F, Ops.begin(), Ops.end(), name); 1092} 1093 1094Value *CodeGenFunction::EmitNeonShiftVector(Value *V, const llvm::Type *Ty, 1095 bool neg) { 1096 ConstantInt *CI = cast<ConstantInt>(V); 1097 int SV = CI->getSExtValue(); 1098 1099 const llvm::VectorType *VTy = cast<llvm::VectorType>(Ty); 1100 llvm::Constant *C = ConstantInt::get(VTy->getElementType(), neg ? -SV : SV); 1101 SmallVector<llvm::Constant*, 16> CV(VTy->getNumElements(), C); 1102 return llvm::ConstantVector::get(CV.begin(), CV.size()); 1103} 1104 1105/// GetPointeeAlignment - Given an expression with a pointer type, find the 1106/// alignment of the type referenced by the pointer. Skip over implicit 1107/// casts. 1108static Value *GetPointeeAlignment(CodeGenFunction &CGF, const Expr *Addr) { 1109 unsigned Align = 1; 1110 // Check if the type is a pointer. The implicit cast operand might not be. 1111 while (Addr->getType()->isPointerType()) { 1112 QualType PtTy = Addr->getType()->getPointeeType(); 1113 unsigned NewA = CGF.getContext().getTypeAlignInChars(PtTy).getQuantity(); 1114 if (NewA > Align) 1115 Align = NewA; 1116 1117 // If the address is an implicit cast, repeat with the cast operand. 1118 if (const ImplicitCastExpr *CastAddr = dyn_cast<ImplicitCastExpr>(Addr)) { 1119 Addr = CastAddr->getSubExpr(); 1120 continue; 1121 } 1122 break; 1123 } 1124 return llvm::ConstantInt::get(CGF.Int32Ty, Align); 1125} 1126 1127Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, 1128 const CallExpr *E) { 1129 if (BuiltinID == ARM::BI__clear_cache) { 1130 const FunctionDecl *FD = E->getDirectCallee(); 1131 Value *a = EmitScalarExpr(E->getArg(0)); 1132 Value *b = EmitScalarExpr(E->getArg(1)); 1133 const llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType()); 1134 const llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty); 1135 llvm::StringRef Name = FD->getName(); 1136 return Builder.CreateCall2(CGM.CreateRuntimeFunction(FTy, Name), 1137 a, b); 1138 } 1139 1140 llvm::SmallVector<Value*, 4> Ops; 1141 for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) 1142 Ops.push_back(EmitScalarExpr(E->getArg(i))); 1143 1144 llvm::APSInt Result; 1145 const Expr *Arg = E->getArg(E->getNumArgs()-1); 1146 if (!Arg->isIntegerConstantExpr(Result, getContext())) 1147 return 0; 1148 1149 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f || 1150 BuiltinID == ARM::BI__builtin_arm_vcvtr_d) { 1151 // Determine the overloaded type of this builtin. 1152 const llvm::Type *Ty; 1153 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f) 1154 Ty = llvm::Type::getFloatTy(VMContext); 1155 else 1156 Ty = llvm::Type::getDoubleTy(VMContext); 1157 1158 // Determine whether this is an unsigned conversion or not. 1159 bool usgn = Result.getZExtValue() == 1; 1160 unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr; 1161 1162 // Call the appropriate intrinsic. 1163 Function *F = CGM.getIntrinsic(Int, &Ty, 1); 1164 return Builder.CreateCall(F, Ops.begin(), Ops.end(), "vcvtr"); 1165 } 1166 1167 // Determine the type of this overloaded NEON intrinsic. 1168 unsigned type = Result.getZExtValue(); 1169 bool usgn = type & 0x08; 1170 bool quad = type & 0x10; 1171 bool poly = (type & 0x7) == 5 || (type & 0x7) == 6; 1172 (void)poly; // Only used in assert()s. 1173 bool splat = false; 1174 bool rightShift = false; 1175 1176 const llvm::VectorType *VTy = GetNeonType(VMContext, type & 0x7, quad); 1177 const llvm::Type *Ty = VTy; 1178 if (!Ty) 1179 return 0; 1180 1181 unsigned Int; 1182 switch (BuiltinID) { 1183 default: return 0; 1184 case ARM::BI__builtin_neon_vabd_v: 1185 case ARM::BI__builtin_neon_vabdq_v: 1186 Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds; 1187 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vabd"); 1188 case ARM::BI__builtin_neon_vabs_v: 1189 case ARM::BI__builtin_neon_vabsq_v: 1190 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vabs, &Ty, 1), 1191 Ops, "vabs"); 1192 case ARM::BI__builtin_neon_vaddhn_v: 1193 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vaddhn, &Ty, 1), 1194 Ops, "vaddhn"); 1195 case ARM::BI__builtin_neon_vcale_v: 1196 std::swap(Ops[0], Ops[1]); 1197 case ARM::BI__builtin_neon_vcage_v: { 1198 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacged, &Ty, 1); 1199 return EmitNeonCall(F, Ops, "vcage"); 1200 } 1201 case ARM::BI__builtin_neon_vcaleq_v: 1202 std::swap(Ops[0], Ops[1]); 1203 case ARM::BI__builtin_neon_vcageq_v: { 1204 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgeq, &Ty, 1); 1205 return EmitNeonCall(F, Ops, "vcage"); 1206 } 1207 case ARM::BI__builtin_neon_vcalt_v: 1208 std::swap(Ops[0], Ops[1]); 1209 case ARM::BI__builtin_neon_vcagt_v: { 1210 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtd, &Ty, 1); 1211 return EmitNeonCall(F, Ops, "vcagt"); 1212 } 1213 case ARM::BI__builtin_neon_vcaltq_v: 1214 std::swap(Ops[0], Ops[1]); 1215 case ARM::BI__builtin_neon_vcagtq_v: { 1216 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtq, &Ty, 1); 1217 return EmitNeonCall(F, Ops, "vcagt"); 1218 } 1219 case ARM::BI__builtin_neon_vcls_v: 1220 case ARM::BI__builtin_neon_vclsq_v: { 1221 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcls, &Ty, 1); 1222 return EmitNeonCall(F, Ops, "vcls"); 1223 } 1224 case ARM::BI__builtin_neon_vclz_v: 1225 case ARM::BI__builtin_neon_vclzq_v: { 1226 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vclz, &Ty, 1); 1227 return EmitNeonCall(F, Ops, "vclz"); 1228 } 1229 case ARM::BI__builtin_neon_vcnt_v: 1230 case ARM::BI__builtin_neon_vcntq_v: { 1231 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcnt, &Ty, 1); 1232 return EmitNeonCall(F, Ops, "vcnt"); 1233 } 1234 // FIXME: intrinsics for f16<->f32 convert missing from ARM target. 1235 case ARM::BI__builtin_neon_vcvt_f32_v: 1236 case ARM::BI__builtin_neon_vcvtq_f32_v: { 1237 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1238 Ty = GetNeonType(VMContext, 4, quad); 1239 return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt") 1240 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt"); 1241 } 1242 case ARM::BI__builtin_neon_vcvt_s32_v: 1243 case ARM::BI__builtin_neon_vcvt_u32_v: 1244 case ARM::BI__builtin_neon_vcvtq_s32_v: 1245 case ARM::BI__builtin_neon_vcvtq_u32_v: { 1246 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(VMContext, 4, quad)); 1247 return usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt") 1248 : Builder.CreateFPToSI(Ops[0], Ty, "vcvt"); 1249 } 1250 case ARM::BI__builtin_neon_vcvt_n_f32_v: 1251 case ARM::BI__builtin_neon_vcvtq_n_f32_v: { 1252 const llvm::Type *Tys[2] = { GetNeonType(VMContext, 4, quad), Ty }; 1253 Int = usgn ? Intrinsic::arm_neon_vcvtfxu2fp : Intrinsic::arm_neon_vcvtfxs2fp; 1254 Function *F = CGM.getIntrinsic(Int, Tys, 2); 1255 return EmitNeonCall(F, Ops, "vcvt_n"); 1256 } 1257 case ARM::BI__builtin_neon_vcvt_n_s32_v: 1258 case ARM::BI__builtin_neon_vcvt_n_u32_v: 1259 case ARM::BI__builtin_neon_vcvtq_n_s32_v: 1260 case ARM::BI__builtin_neon_vcvtq_n_u32_v: { 1261 const llvm::Type *Tys[2] = { Ty, GetNeonType(VMContext, 4, quad) }; 1262 Int = usgn ? Intrinsic::arm_neon_vcvtfp2fxu : Intrinsic::arm_neon_vcvtfp2fxs; 1263 Function *F = CGM.getIntrinsic(Int, Tys, 2); 1264 return EmitNeonCall(F, Ops, "vcvt_n"); 1265 } 1266 case ARM::BI__builtin_neon_vext_v: 1267 case ARM::BI__builtin_neon_vextq_v: { 1268 ConstantInt *C = dyn_cast<ConstantInt>(Ops[2]); 1269 int CV = C->getSExtValue(); 1270 SmallVector<Constant*, 16> Indices; 1271 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) 1272 Indices.push_back(ConstantInt::get(Int32Ty, i+CV)); 1273 1274 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1275 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 1276 Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size()); 1277 return Builder.CreateShuffleVector(Ops[0], Ops[1], SV, "vext"); 1278 } 1279 case ARM::BI__builtin_neon_vget_lane_i8: 1280 case ARM::BI__builtin_neon_vget_lane_i16: 1281 case ARM::BI__builtin_neon_vget_lane_i32: 1282 case ARM::BI__builtin_neon_vget_lane_i64: 1283 case ARM::BI__builtin_neon_vget_lane_f32: 1284 case ARM::BI__builtin_neon_vgetq_lane_i8: 1285 case ARM::BI__builtin_neon_vgetq_lane_i16: 1286 case ARM::BI__builtin_neon_vgetq_lane_i32: 1287 case ARM::BI__builtin_neon_vgetq_lane_i64: 1288 case ARM::BI__builtin_neon_vgetq_lane_f32: 1289 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), 1290 "vget_lane"); 1291 case ARM::BI__builtin_neon_vhadd_v: 1292 case ARM::BI__builtin_neon_vhaddq_v: 1293 Int = usgn ? Intrinsic::arm_neon_vhaddu : Intrinsic::arm_neon_vhadds; 1294 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vhadd"); 1295 case ARM::BI__builtin_neon_vhsub_v: 1296 case ARM::BI__builtin_neon_vhsubq_v: 1297 Int = usgn ? Intrinsic::arm_neon_vhsubu : Intrinsic::arm_neon_vhsubs; 1298 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vhsub"); 1299 case ARM::BI__builtin_neon_vld1_v: 1300 case ARM::BI__builtin_neon_vld1q_v: 1301 Ops.push_back(GetPointeeAlignment(*this, E->getArg(0))); 1302 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vld1, &Ty, 1), 1303 Ops, "vld1"); 1304 case ARM::BI__builtin_neon_vld1_lane_v: 1305 case ARM::BI__builtin_neon_vld1q_lane_v: 1306 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 1307 Ty = llvm::PointerType::getUnqual(VTy->getElementType()); 1308 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1309 Ops[0] = Builder.CreateLoad(Ops[0]); 1310 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane"); 1311 case ARM::BI__builtin_neon_vld1_dup_v: 1312 case ARM::BI__builtin_neon_vld1q_dup_v: { 1313 Value *V = UndefValue::get(Ty); 1314 Ty = llvm::PointerType::getUnqual(VTy->getElementType()); 1315 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1316 Ops[0] = Builder.CreateLoad(Ops[0]); 1317 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0); 1318 Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI); 1319 return EmitNeonSplat(Ops[0], CI); 1320 } 1321 case ARM::BI__builtin_neon_vld2_v: 1322 case ARM::BI__builtin_neon_vld2q_v: { 1323 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2, &Ty, 1); 1324 Value *Align = GetPointeeAlignment(*this, E->getArg(1)); 1325 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld2"); 1326 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 1327 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1328 return Builder.CreateStore(Ops[1], Ops[0]); 1329 } 1330 case ARM::BI__builtin_neon_vld3_v: 1331 case ARM::BI__builtin_neon_vld3q_v: { 1332 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3, &Ty, 1); 1333 Value *Align = GetPointeeAlignment(*this, E->getArg(1)); 1334 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld3"); 1335 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 1336 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1337 return Builder.CreateStore(Ops[1], Ops[0]); 1338 } 1339 case ARM::BI__builtin_neon_vld4_v: 1340 case ARM::BI__builtin_neon_vld4q_v: { 1341 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4, &Ty, 1); 1342 Value *Align = GetPointeeAlignment(*this, E->getArg(1)); 1343 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld4"); 1344 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 1345 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1346 return Builder.CreateStore(Ops[1], Ops[0]); 1347 } 1348 case ARM::BI__builtin_neon_vld2_lane_v: 1349 case ARM::BI__builtin_neon_vld2q_lane_v: { 1350 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2lane, &Ty, 1); 1351 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 1352 Ops[3] = Builder.CreateBitCast(Ops[3], Ty); 1353 Ops.push_back(GetPointeeAlignment(*this, E->getArg(1))); 1354 Ops[1] = Builder.CreateCall(F, Ops.begin() + 1, Ops.end(), "vld2_lane"); 1355 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 1356 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1357 return Builder.CreateStore(Ops[1], Ops[0]); 1358 } 1359 case ARM::BI__builtin_neon_vld3_lane_v: 1360 case ARM::BI__builtin_neon_vld3q_lane_v: { 1361 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3lane, &Ty, 1); 1362 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 1363 Ops[3] = Builder.CreateBitCast(Ops[3], Ty); 1364 Ops[4] = Builder.CreateBitCast(Ops[4], Ty); 1365 Ops.push_back(GetPointeeAlignment(*this, E->getArg(1))); 1366 Ops[1] = Builder.CreateCall(F, Ops.begin() + 1, Ops.end(), "vld3_lane"); 1367 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 1368 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1369 return Builder.CreateStore(Ops[1], Ops[0]); 1370 } 1371 case ARM::BI__builtin_neon_vld4_lane_v: 1372 case ARM::BI__builtin_neon_vld4q_lane_v: { 1373 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4lane, &Ty, 1); 1374 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 1375 Ops[3] = Builder.CreateBitCast(Ops[3], Ty); 1376 Ops[4] = Builder.CreateBitCast(Ops[4], Ty); 1377 Ops[5] = Builder.CreateBitCast(Ops[5], Ty); 1378 Ops.push_back(GetPointeeAlignment(*this, E->getArg(1))); 1379 Ops[1] = Builder.CreateCall(F, Ops.begin() + 1, Ops.end(), "vld3_lane"); 1380 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 1381 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1382 return Builder.CreateStore(Ops[1], Ops[0]); 1383 } 1384 case ARM::BI__builtin_neon_vld2_dup_v: 1385 case ARM::BI__builtin_neon_vld3_dup_v: 1386 case ARM::BI__builtin_neon_vld4_dup_v: { 1387 switch (BuiltinID) { 1388 case ARM::BI__builtin_neon_vld2_dup_v: 1389 Int = Intrinsic::arm_neon_vld2lane; 1390 break; 1391 case ARM::BI__builtin_neon_vld3_dup_v: 1392 Int = Intrinsic::arm_neon_vld2lane; 1393 break; 1394 case ARM::BI__builtin_neon_vld4_dup_v: 1395 Int = Intrinsic::arm_neon_vld2lane; 1396 break; 1397 default: assert(0 && "unknown vld_dup intrinsic?"); 1398 } 1399 Function *F = CGM.getIntrinsic(Int, &Ty, 1); 1400 const llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType()); 1401 1402 SmallVector<Value*, 6> Args; 1403 Args.push_back(Ops[1]); 1404 Args.append(STy->getNumElements(), UndefValue::get(Ty)); 1405 1406 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0); 1407 Args.push_back(CI); 1408 Args.push_back(GetPointeeAlignment(*this, E->getArg(1))); 1409 1410 Ops[1] = Builder.CreateCall(F, Args.begin(), Args.end(), "vld_dup"); 1411 // splat lane 0 to all elts in each vector of the result. 1412 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1413 Value *Val = Builder.CreateExtractValue(Ops[1], i); 1414 Value *Elt = Builder.CreateBitCast(Val, Ty); 1415 Elt = EmitNeonSplat(Elt, CI); 1416 Elt = Builder.CreateBitCast(Elt, Val->getType()); 1417 Ops[1] = Builder.CreateInsertValue(Ops[1], Elt, i); 1418 } 1419 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 1420 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1421 return Builder.CreateStore(Ops[1], Ops[0]); 1422 } 1423 case ARM::BI__builtin_neon_vmax_v: 1424 case ARM::BI__builtin_neon_vmaxq_v: 1425 Int = usgn ? Intrinsic::arm_neon_vmaxu : Intrinsic::arm_neon_vmaxs; 1426 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmax"); 1427 case ARM::BI__builtin_neon_vmin_v: 1428 case ARM::BI__builtin_neon_vminq_v: 1429 Int = usgn ? Intrinsic::arm_neon_vminu : Intrinsic::arm_neon_vmins; 1430 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmin"); 1431 case ARM::BI__builtin_neon_vmovl_v: { 1432 const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy); 1433 Ops[0] = Builder.CreateBitCast(Ops[0], DTy); 1434 if (usgn) 1435 return Builder.CreateZExt(Ops[0], Ty, "vmovl"); 1436 return Builder.CreateSExt(Ops[0], Ty, "vmovl"); 1437 } 1438 case ARM::BI__builtin_neon_vmovn_v: { 1439 const llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy); 1440 Ops[0] = Builder.CreateBitCast(Ops[0], QTy); 1441 return Builder.CreateTrunc(Ops[0], Ty, "vmovn"); 1442 } 1443 case ARM::BI__builtin_neon_vmul_v: 1444 assert(poly && "vmul builtin only supported for polynomial types"); 1445 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmulp, &Ty, 1), 1446 Ops, "vmul"); 1447 case ARM::BI__builtin_neon_vmull_v: 1448 assert(poly && "vmull builtin only supported for polynomial types"); 1449 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmullp, &Ty, 1), 1450 Ops, "vmull"); 1451 case ARM::BI__builtin_neon_vpadal_v: 1452 case ARM::BI__builtin_neon_vpadalq_v: 1453 Int = usgn ? Intrinsic::arm_neon_vpadalu : Intrinsic::arm_neon_vpadals; 1454 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vpadal"); 1455 case ARM::BI__builtin_neon_vpadd_v: 1456 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vpadd, &Ty, 1), 1457 Ops, "vpadd"); 1458 case ARM::BI__builtin_neon_vpaddl_v: 1459 case ARM::BI__builtin_neon_vpaddlq_v: 1460 Int = usgn ? Intrinsic::arm_neon_vpaddlu : Intrinsic::arm_neon_vpaddls; 1461 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vpaddl"); 1462 case ARM::BI__builtin_neon_vpmax_v: 1463 Int = usgn ? Intrinsic::arm_neon_vpmaxu : Intrinsic::arm_neon_vpmaxs; 1464 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vpmax"); 1465 case ARM::BI__builtin_neon_vpmin_v: 1466 Int = usgn ? Intrinsic::arm_neon_vpminu : Intrinsic::arm_neon_vpmins; 1467 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vpmin"); 1468 case ARM::BI__builtin_neon_vqabs_v: 1469 case ARM::BI__builtin_neon_vqabsq_v: 1470 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqabs, &Ty, 1), 1471 Ops, "vqabs"); 1472 case ARM::BI__builtin_neon_vqadd_v: 1473 case ARM::BI__builtin_neon_vqaddq_v: 1474 Int = usgn ? Intrinsic::arm_neon_vqaddu : Intrinsic::arm_neon_vqadds; 1475 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqadd"); 1476 case ARM::BI__builtin_neon_vqdmlal_lane_v: 1477 splat = true; 1478 case ARM::BI__builtin_neon_vqdmlal_v: 1479 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlal, &Ty, 1), 1480 Ops, "vqdmlal", splat); 1481 case ARM::BI__builtin_neon_vqdmlsl_lane_v: 1482 splat = true; 1483 case ARM::BI__builtin_neon_vqdmlsl_v: 1484 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlsl, &Ty, 1), 1485 Ops, "vqdmlsl", splat); 1486 case ARM::BI__builtin_neon_vqdmulh_lane_v: 1487 case ARM::BI__builtin_neon_vqdmulhq_lane_v: 1488 splat = true; 1489 case ARM::BI__builtin_neon_vqdmulh_v: 1490 case ARM::BI__builtin_neon_vqdmulhq_v: 1491 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmulh, &Ty, 1), 1492 Ops, "vqdmulh", splat); 1493 case ARM::BI__builtin_neon_vqdmull_lane_v: 1494 splat = true; 1495 case ARM::BI__builtin_neon_vqdmull_v: 1496 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, &Ty, 1), 1497 Ops, "vqdmull", splat); 1498 case ARM::BI__builtin_neon_vqmovn_v: 1499 Int = usgn ? Intrinsic::arm_neon_vqmovnu : Intrinsic::arm_neon_vqmovns; 1500 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqmovn"); 1501 case ARM::BI__builtin_neon_vqmovun_v: 1502 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqmovnsu, &Ty, 1), 1503 Ops, "vqdmull"); 1504 case ARM::BI__builtin_neon_vqneg_v: 1505 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqneg, &Ty, 1), 1506 Ops, "vqneg"); 1507 case ARM::BI__builtin_neon_vqrdmulh_lane_v: 1508 case ARM::BI__builtin_neon_vqrdmulhq_lane_v: 1509 splat = true; 1510 case ARM::BI__builtin_neon_vqrdmulh_v: 1511 case ARM::BI__builtin_neon_vqrdmulhq_v: 1512 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrdmulh, &Ty, 1), 1513 Ops, "vqrdmulh", splat); 1514 case ARM::BI__builtin_neon_vqrshl_v: 1515 case ARM::BI__builtin_neon_vqrshlq_v: 1516 Int = usgn ? Intrinsic::arm_neon_vqrshiftu : Intrinsic::arm_neon_vqrshifts; 1517 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqrshl"); 1518 case ARM::BI__builtin_neon_vqrshrn_n_v: 1519 Int = usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns; 1520 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqrshrn_n", false, 1521 1, true); 1522 case ARM::BI__builtin_neon_vqrshrun_n_v: 1523 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, &Ty, 1), 1524 Ops, "vqrshrun_n", false, 1, true); 1525 case ARM::BI__builtin_neon_vqshl_v: 1526 case ARM::BI__builtin_neon_vqshlq_v: 1527 Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts; 1528 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqshl"); 1529 case ARM::BI__builtin_neon_vqshl_n_v: 1530 case ARM::BI__builtin_neon_vqshlq_n_v: 1531 Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts; 1532 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqshl_n", false, 1533 1, false); 1534 case ARM::BI__builtin_neon_vqshlu_n_v: 1535 case ARM::BI__builtin_neon_vqshluq_n_v: 1536 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftsu, &Ty, 1), 1537 Ops, "vqshlu", false, 1, false); 1538 case ARM::BI__builtin_neon_vqshrn_n_v: 1539 Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns; 1540 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqshrn_n", false, 1541 1, true); 1542 case ARM::BI__builtin_neon_vqshrun_n_v: 1543 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, &Ty, 1), 1544 Ops, "vqshrun_n", false, 1, true); 1545 case ARM::BI__builtin_neon_vqsub_v: 1546 case ARM::BI__builtin_neon_vqsubq_v: 1547 Int = usgn ? Intrinsic::arm_neon_vqsubu : Intrinsic::arm_neon_vqsubs; 1548 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqsub"); 1549 case ARM::BI__builtin_neon_vraddhn_v: 1550 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vraddhn, &Ty, 1), 1551 Ops, "vraddhn"); 1552 case ARM::BI__builtin_neon_vrecpe_v: 1553 case ARM::BI__builtin_neon_vrecpeq_v: 1554 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, &Ty, 1), 1555 Ops, "vrecpe"); 1556 case ARM::BI__builtin_neon_vrecps_v: 1557 case ARM::BI__builtin_neon_vrecpsq_v: 1558 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecps, &Ty, 1), 1559 Ops, "vrecps"); 1560 case ARM::BI__builtin_neon_vrhadd_v: 1561 case ARM::BI__builtin_neon_vrhaddq_v: 1562 Int = usgn ? Intrinsic::arm_neon_vrhaddu : Intrinsic::arm_neon_vrhadds; 1563 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vrhadd"); 1564 case ARM::BI__builtin_neon_vrshl_v: 1565 case ARM::BI__builtin_neon_vrshlq_v: 1566 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts; 1567 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vrshl"); 1568 case ARM::BI__builtin_neon_vrshrn_n_v: 1569 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, &Ty, 1), 1570 Ops, "vrshrn_n", false, 1, true); 1571 case ARM::BI__builtin_neon_vrshr_n_v: 1572 case ARM::BI__builtin_neon_vrshrq_n_v: 1573 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts; 1574 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vrshr_n", false, 1575 1, true); 1576 case ARM::BI__builtin_neon_vrsqrte_v: 1577 case ARM::BI__builtin_neon_vrsqrteq_v: 1578 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrte, &Ty, 1), 1579 Ops, "vrsqrte"); 1580 case ARM::BI__builtin_neon_vrsqrts_v: 1581 case ARM::BI__builtin_neon_vrsqrtsq_v: 1582 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrts, &Ty, 1), 1583 Ops, "vrsqrts"); 1584 case ARM::BI__builtin_neon_vrsra_n_v: 1585 case ARM::BI__builtin_neon_vrsraq_n_v: 1586 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1587 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 1588 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true); 1589 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts; 1590 Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, &Ty, 1), Ops[1], Ops[2]); 1591 return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n"); 1592 case ARM::BI__builtin_neon_vrsubhn_v: 1593 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsubhn, &Ty, 1), 1594 Ops, "vrsubhn"); 1595 case ARM::BI__builtin_neon_vset_lane_i8: 1596 case ARM::BI__builtin_neon_vset_lane_i16: 1597 case ARM::BI__builtin_neon_vset_lane_i32: 1598 case ARM::BI__builtin_neon_vset_lane_i64: 1599 case ARM::BI__builtin_neon_vset_lane_f32: 1600 case ARM::BI__builtin_neon_vsetq_lane_i8: 1601 case ARM::BI__builtin_neon_vsetq_lane_i16: 1602 case ARM::BI__builtin_neon_vsetq_lane_i32: 1603 case ARM::BI__builtin_neon_vsetq_lane_i64: 1604 case ARM::BI__builtin_neon_vsetq_lane_f32: 1605 Ops.push_back(EmitScalarExpr(E->getArg(2))); 1606 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane"); 1607 case ARM::BI__builtin_neon_vshl_v: 1608 case ARM::BI__builtin_neon_vshlq_v: 1609 Int = usgn ? Intrinsic::arm_neon_vshiftu : Intrinsic::arm_neon_vshifts; 1610 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vshl"); 1611 case ARM::BI__builtin_neon_vshll_n_v: 1612 Int = usgn ? Intrinsic::arm_neon_vshiftlu : Intrinsic::arm_neon_vshiftls; 1613 return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vshll", false, 1); 1614 case ARM::BI__builtin_neon_vshl_n_v: 1615 case ARM::BI__builtin_neon_vshlq_n_v: 1616 Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false); 1617 return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1], "vshl_n"); 1618 case ARM::BI__builtin_neon_vshrn_n_v: 1619 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftn, &Ty, 1), 1620 Ops, "vshrn_n", false, 1, true); 1621 case ARM::BI__builtin_neon_vshr_n_v: 1622 case ARM::BI__builtin_neon_vshrq_n_v: 1623 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1624 Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false); 1625 if (usgn) 1626 return Builder.CreateLShr(Ops[0], Ops[1], "vshr_n"); 1627 else 1628 return Builder.CreateAShr(Ops[0], Ops[1], "vshr_n"); 1629 case ARM::BI__builtin_neon_vsri_n_v: 1630 case ARM::BI__builtin_neon_vsriq_n_v: 1631 rightShift = true; 1632 case ARM::BI__builtin_neon_vsli_n_v: 1633 case ARM::BI__builtin_neon_vsliq_n_v: 1634 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift); 1635 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, &Ty, 1), 1636 Ops, "vsli_n"); 1637 case ARM::BI__builtin_neon_vsra_n_v: 1638 case ARM::BI__builtin_neon_vsraq_n_v: 1639 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1640 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 1641 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, false); 1642 if (usgn) 1643 Ops[1] = Builder.CreateLShr(Ops[1], Ops[2], "vsra_n"); 1644 else 1645 Ops[1] = Builder.CreateAShr(Ops[1], Ops[2], "vsra_n"); 1646 return Builder.CreateAdd(Ops[0], Ops[1]); 1647 case ARM::BI__builtin_neon_vst1_v: 1648 case ARM::BI__builtin_neon_vst1q_v: 1649 Ops.push_back(GetPointeeAlignment(*this, E->getArg(0))); 1650 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, &Ty, 1), 1651 Ops, ""); 1652 case ARM::BI__builtin_neon_vst1_lane_v: 1653 case ARM::BI__builtin_neon_vst1q_lane_v: 1654 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 1655 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]); 1656 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 1657 return Builder.CreateStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty)); 1658 case ARM::BI__builtin_neon_vst2_v: 1659 case ARM::BI__builtin_neon_vst2q_v: 1660 Ops.push_back(GetPointeeAlignment(*this, E->getArg(0))); 1661 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2, &Ty, 1), 1662 Ops, ""); 1663 case ARM::BI__builtin_neon_vst2_lane_v: 1664 case ARM::BI__builtin_neon_vst2q_lane_v: 1665 Ops.push_back(GetPointeeAlignment(*this, E->getArg(0))); 1666 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2lane, &Ty, 1), 1667 Ops, ""); 1668 case ARM::BI__builtin_neon_vst3_v: 1669 case ARM::BI__builtin_neon_vst3q_v: 1670 Ops.push_back(GetPointeeAlignment(*this, E->getArg(0))); 1671 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3, &Ty, 1), 1672 Ops, ""); 1673 case ARM::BI__builtin_neon_vst3_lane_v: 1674 case ARM::BI__builtin_neon_vst3q_lane_v: 1675 Ops.push_back(GetPointeeAlignment(*this, E->getArg(0))); 1676 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3lane, &Ty, 1), 1677 Ops, ""); 1678 case ARM::BI__builtin_neon_vst4_v: 1679 case ARM::BI__builtin_neon_vst4q_v: 1680 Ops.push_back(GetPointeeAlignment(*this, E->getArg(0))); 1681 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4, &Ty, 1), 1682 Ops, ""); 1683 case ARM::BI__builtin_neon_vst4_lane_v: 1684 case ARM::BI__builtin_neon_vst4q_lane_v: 1685 Ops.push_back(GetPointeeAlignment(*this, E->getArg(0))); 1686 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4lane, &Ty, 1), 1687 Ops, ""); 1688 case ARM::BI__builtin_neon_vsubhn_v: 1689 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vsubhn, &Ty, 1), 1690 Ops, "vsubhn"); 1691 case ARM::BI__builtin_neon_vtbl1_v: 1692 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1), 1693 Ops, "vtbl1"); 1694 case ARM::BI__builtin_neon_vtbl2_v: 1695 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2), 1696 Ops, "vtbl2"); 1697 case ARM::BI__builtin_neon_vtbl3_v: 1698 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3), 1699 Ops, "vtbl3"); 1700 case ARM::BI__builtin_neon_vtbl4_v: 1701 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4), 1702 Ops, "vtbl4"); 1703 case ARM::BI__builtin_neon_vtbx1_v: 1704 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1), 1705 Ops, "vtbx1"); 1706 case ARM::BI__builtin_neon_vtbx2_v: 1707 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2), 1708 Ops, "vtbx2"); 1709 case ARM::BI__builtin_neon_vtbx3_v: 1710 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3), 1711 Ops, "vtbx3"); 1712 case ARM::BI__builtin_neon_vtbx4_v: 1713 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4), 1714 Ops, "vtbx4"); 1715 case ARM::BI__builtin_neon_vtst_v: 1716 case ARM::BI__builtin_neon_vtstq_v: { 1717 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1718 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 1719 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]); 1720 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0], 1721 ConstantAggregateZero::get(Ty)); 1722 return Builder.CreateSExt(Ops[0], Ty, "vtst"); 1723 } 1724 case ARM::BI__builtin_neon_vtrn_v: 1725 case ARM::BI__builtin_neon_vtrnq_v: { 1726 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); 1727 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 1728 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 1729 Value *SV; 1730 1731 for (unsigned vi = 0; vi != 2; ++vi) { 1732 SmallVector<Constant*, 16> Indices; 1733 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { 1734 Indices.push_back(ConstantInt::get(Int32Ty, i+vi)); 1735 Indices.push_back(ConstantInt::get(Int32Ty, i+e+vi)); 1736 } 1737 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi); 1738 SV = llvm::ConstantVector::get(Indices.begin(), Indices.size()); 1739 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn"); 1740 SV = Builder.CreateStore(SV, Addr); 1741 } 1742 return SV; 1743 } 1744 case ARM::BI__builtin_neon_vuzp_v: 1745 case ARM::BI__builtin_neon_vuzpq_v: { 1746 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); 1747 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 1748 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 1749 Value *SV; 1750 1751 for (unsigned vi = 0; vi != 2; ++vi) { 1752 SmallVector<Constant*, 16> Indices; 1753 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) 1754 Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi)); 1755 1756 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi); 1757 SV = llvm::ConstantVector::get(Indices.begin(), Indices.size()); 1758 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp"); 1759 SV = Builder.CreateStore(SV, Addr); 1760 } 1761 return SV; 1762 } 1763 case ARM::BI__builtin_neon_vzip_v: 1764 case ARM::BI__builtin_neon_vzipq_v: { 1765 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); 1766 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 1767 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 1768 Value *SV; 1769 1770 for (unsigned vi = 0; vi != 2; ++vi) { 1771 SmallVector<Constant*, 16> Indices; 1772 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { 1773 Indices.push_back(ConstantInt::get(Int32Ty, (i + vi*e) >> 1)); 1774 Indices.push_back(ConstantInt::get(Int32Ty, ((i + vi*e) >> 1)+e)); 1775 } 1776 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi); 1777 SV = llvm::ConstantVector::get(Indices.begin(), Indices.size()); 1778 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip"); 1779 SV = Builder.CreateStore(SV, Addr); 1780 } 1781 return SV; 1782 } 1783 } 1784} 1785 1786llvm::Value *CodeGenFunction:: 1787BuildVector(const llvm::SmallVectorImpl<llvm::Value*> &Ops) { 1788 assert((Ops.size() & (Ops.size() - 1)) == 0 && 1789 "Not a power-of-two sized vector!"); 1790 bool AllConstants = true; 1791 for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i) 1792 AllConstants &= isa<Constant>(Ops[i]); 1793 1794 // If this is a constant vector, create a ConstantVector. 1795 if (AllConstants) { 1796 std::vector<llvm::Constant*> CstOps; 1797 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1798 CstOps.push_back(cast<Constant>(Ops[i])); 1799 return llvm::ConstantVector::get(CstOps); 1800 } 1801 1802 // Otherwise, insertelement the values to build the vector. 1803 Value *Result = 1804 llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), Ops.size())); 1805 1806 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1807 Result = Builder.CreateInsertElement(Result, Ops[i], 1808 llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), i)); 1809 1810 return Result; 1811} 1812 1813Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, 1814 const CallExpr *E) { 1815 llvm::SmallVector<Value*, 4> Ops; 1816 1817 // Find out if any arguments are required to be integer constant expressions. 1818 unsigned ICEArguments = 0; 1819 ASTContext::GetBuiltinTypeError Error; 1820 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); 1821 assert(Error == ASTContext::GE_None && "Should not codegen an error"); 1822 1823 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) { 1824 // If this is a normal argument, just emit it as a scalar. 1825 if ((ICEArguments & (1 << i)) == 0) { 1826 Ops.push_back(EmitScalarExpr(E->getArg(i))); 1827 continue; 1828 } 1829 1830 // If this is required to be a constant, constant fold it so that we know 1831 // that the generated intrinsic gets a ConstantInt. 1832 llvm::APSInt Result; 1833 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext()); 1834 assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst; 1835 Ops.push_back(llvm::ConstantInt::get(VMContext, Result)); 1836 } 1837 1838 switch (BuiltinID) { 1839 default: return 0; 1840 case X86::BI__builtin_ia32_pslldi128: 1841 case X86::BI__builtin_ia32_psllqi128: 1842 case X86::BI__builtin_ia32_psllwi128: 1843 case X86::BI__builtin_ia32_psradi128: 1844 case X86::BI__builtin_ia32_psrawi128: 1845 case X86::BI__builtin_ia32_psrldi128: 1846 case X86::BI__builtin_ia32_psrlqi128: 1847 case X86::BI__builtin_ia32_psrlwi128: { 1848 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty, "zext"); 1849 const llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 2); 1850 llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0); 1851 Ops[1] = Builder.CreateInsertElement(llvm::UndefValue::get(Ty), 1852 Ops[1], Zero, "insert"); 1853 Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType(), "bitcast"); 1854 const char *name = 0; 1855 Intrinsic::ID ID = Intrinsic::not_intrinsic; 1856 1857 switch (BuiltinID) { 1858 default: assert(0 && "Unsupported shift intrinsic!"); 1859 case X86::BI__builtin_ia32_pslldi128: 1860 name = "pslldi"; 1861 ID = Intrinsic::x86_sse2_psll_d; 1862 break; 1863 case X86::BI__builtin_ia32_psllqi128: 1864 name = "psllqi"; 1865 ID = Intrinsic::x86_sse2_psll_q; 1866 break; 1867 case X86::BI__builtin_ia32_psllwi128: 1868 name = "psllwi"; 1869 ID = Intrinsic::x86_sse2_psll_w; 1870 break; 1871 case X86::BI__builtin_ia32_psradi128: 1872 name = "psradi"; 1873 ID = Intrinsic::x86_sse2_psra_d; 1874 break; 1875 case X86::BI__builtin_ia32_psrawi128: 1876 name = "psrawi"; 1877 ID = Intrinsic::x86_sse2_psra_w; 1878 break; 1879 case X86::BI__builtin_ia32_psrldi128: 1880 name = "psrldi"; 1881 ID = Intrinsic::x86_sse2_psrl_d; 1882 break; 1883 case X86::BI__builtin_ia32_psrlqi128: 1884 name = "psrlqi"; 1885 ID = Intrinsic::x86_sse2_psrl_q; 1886 break; 1887 case X86::BI__builtin_ia32_psrlwi128: 1888 name = "psrlwi"; 1889 ID = Intrinsic::x86_sse2_psrl_w; 1890 break; 1891 } 1892 llvm::Function *F = CGM.getIntrinsic(ID); 1893 return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name); 1894 } 1895 case X86::BI__builtin_ia32_vec_init_v8qi: 1896 case X86::BI__builtin_ia32_vec_init_v4hi: 1897 case X86::BI__builtin_ia32_vec_init_v2si: 1898 return Builder.CreateBitCast(BuildVector(Ops), 1899 llvm::Type::getX86_MMXTy(VMContext)); 1900 case X86::BI__builtin_ia32_vec_ext_v2si: 1901 return Builder.CreateExtractElement(Ops[0], 1902 llvm::ConstantInt::get(Ops[1]->getType(), 0)); 1903 case X86::BI__builtin_ia32_pslldi: 1904 case X86::BI__builtin_ia32_psllqi: 1905 case X86::BI__builtin_ia32_psllwi: 1906 case X86::BI__builtin_ia32_psradi: 1907 case X86::BI__builtin_ia32_psrawi: 1908 case X86::BI__builtin_ia32_psrldi: 1909 case X86::BI__builtin_ia32_psrlqi: 1910 case X86::BI__builtin_ia32_psrlwi: { 1911 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty, "zext"); 1912 const llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 1); 1913 Ops[1] = Builder.CreateBitCast(Ops[1], Ty, "bitcast"); 1914 const char *name = 0; 1915 Intrinsic::ID ID = Intrinsic::not_intrinsic; 1916 1917 switch (BuiltinID) { 1918 default: assert(0 && "Unsupported shift intrinsic!"); 1919 case X86::BI__builtin_ia32_pslldi: 1920 name = "pslldi"; 1921 ID = Intrinsic::x86_mmx_psll_d; 1922 break; 1923 case X86::BI__builtin_ia32_psllqi: 1924 name = "psllqi"; 1925 ID = Intrinsic::x86_mmx_psll_q; 1926 break; 1927 case X86::BI__builtin_ia32_psllwi: 1928 name = "psllwi"; 1929 ID = Intrinsic::x86_mmx_psll_w; 1930 break; 1931 case X86::BI__builtin_ia32_psradi: 1932 name = "psradi"; 1933 ID = Intrinsic::x86_mmx_psra_d; 1934 break; 1935 case X86::BI__builtin_ia32_psrawi: 1936 name = "psrawi"; 1937 ID = Intrinsic::x86_mmx_psra_w; 1938 break; 1939 case X86::BI__builtin_ia32_psrldi: 1940 name = "psrldi"; 1941 ID = Intrinsic::x86_mmx_psrl_d; 1942 break; 1943 case X86::BI__builtin_ia32_psrlqi: 1944 name = "psrlqi"; 1945 ID = Intrinsic::x86_mmx_psrl_q; 1946 break; 1947 case X86::BI__builtin_ia32_psrlwi: 1948 name = "psrlwi"; 1949 ID = Intrinsic::x86_mmx_psrl_w; 1950 break; 1951 } 1952 llvm::Function *F = CGM.getIntrinsic(ID); 1953 return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name); 1954 } 1955 case X86::BI__builtin_ia32_cmpps: { 1956 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ps); 1957 return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpps"); 1958 } 1959 case X86::BI__builtin_ia32_cmpss: { 1960 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ss); 1961 return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpss"); 1962 } 1963 case X86::BI__builtin_ia32_ldmxcsr: { 1964 const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext); 1965 Value *One = llvm::ConstantInt::get(Int32Ty, 1); 1966 Value *Tmp = Builder.CreateAlloca(Int32Ty, One, "tmp"); 1967 Builder.CreateStore(Ops[0], Tmp); 1968 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr), 1969 Builder.CreateBitCast(Tmp, PtrTy)); 1970 } 1971 case X86::BI__builtin_ia32_stmxcsr: { 1972 const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext); 1973 Value *One = llvm::ConstantInt::get(Int32Ty, 1); 1974 Value *Tmp = Builder.CreateAlloca(Int32Ty, One, "tmp"); 1975 One = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr), 1976 Builder.CreateBitCast(Tmp, PtrTy)); 1977 return Builder.CreateLoad(Tmp, "stmxcsr"); 1978 } 1979 case X86::BI__builtin_ia32_cmppd: { 1980 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_pd); 1981 return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmppd"); 1982 } 1983 case X86::BI__builtin_ia32_cmpsd: { 1984 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_sd); 1985 return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpsd"); 1986 } 1987 case X86::BI__builtin_ia32_storehps: 1988 case X86::BI__builtin_ia32_storelps: { 1989 llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty); 1990 llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2); 1991 1992 // cast val v2i64 1993 Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast"); 1994 1995 // extract (0, 1) 1996 unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1; 1997 llvm::Value *Idx = llvm::ConstantInt::get(Int32Ty, Index); 1998 Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract"); 1999 2000 // cast pointer to i64 & store 2001 Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy); 2002 return Builder.CreateStore(Ops[1], Ops[0]); 2003 } 2004 case X86::BI__builtin_ia32_palignr: { 2005 unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); 2006 2007 // If palignr is shifting the pair of input vectors less than 9 bytes, 2008 // emit a shuffle instruction. 2009 if (shiftVal <= 8) { 2010 llvm::SmallVector<llvm::Constant*, 8> Indices; 2011 for (unsigned i = 0; i != 8; ++i) 2012 Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i)); 2013 2014 Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size()); 2015 return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr"); 2016 } 2017 2018 // If palignr is shifting the pair of input vectors more than 8 but less 2019 // than 16 bytes, emit a logical right shift of the destination. 2020 if (shiftVal < 16) { 2021 // MMX has these as 1 x i64 vectors for some odd optimization reasons. 2022 const llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 1); 2023 2024 Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast"); 2025 Ops[1] = llvm::ConstantInt::get(VecTy, (shiftVal-8) * 8); 2026 2027 // create i32 constant 2028 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_mmx_psrl_q); 2029 return Builder.CreateCall(F, &Ops[0], &Ops[0] + 2, "palignr"); 2030 } 2031 2032 // If palignr is shifting the pair of vectors more than 32 bytes, emit zero. 2033 return llvm::Constant::getNullValue(ConvertType(E->getType())); 2034 } 2035 case X86::BI__builtin_ia32_palignr128: { 2036 unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); 2037 2038 // If palignr is shifting the pair of input vectors less than 17 bytes, 2039 // emit a shuffle instruction. 2040 if (shiftVal <= 16) { 2041 llvm::SmallVector<llvm::Constant*, 16> Indices; 2042 for (unsigned i = 0; i != 16; ++i) 2043 Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i)); 2044 2045 Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size()); 2046 return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr"); 2047 } 2048 2049 // If palignr is shifting the pair of input vectors more than 16 but less 2050 // than 32 bytes, emit a logical right shift of the destination. 2051 if (shiftVal < 32) { 2052 const llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2); 2053 2054 Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast"); 2055 Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8); 2056 2057 // create i32 constant 2058 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq); 2059 return Builder.CreateCall(F, &Ops[0], &Ops[0] + 2, "palignr"); 2060 } 2061 2062 // If palignr is shifting the pair of vectors more than 32 bytes, emit zero. 2063 return llvm::Constant::getNullValue(ConvertType(E->getType())); 2064 } 2065 } 2066} 2067 2068Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID, 2069 const CallExpr *E) { 2070 llvm::SmallVector<Value*, 4> Ops; 2071 2072 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) 2073 Ops.push_back(EmitScalarExpr(E->getArg(i))); 2074 2075 Intrinsic::ID ID = Intrinsic::not_intrinsic; 2076 2077 switch (BuiltinID) { 2078 default: return 0; 2079 2080 // vec_ld, vec_lvsl, vec_lvsr 2081 case PPC::BI__builtin_altivec_lvx: 2082 case PPC::BI__builtin_altivec_lvxl: 2083 case PPC::BI__builtin_altivec_lvebx: 2084 case PPC::BI__builtin_altivec_lvehx: 2085 case PPC::BI__builtin_altivec_lvewx: 2086 case PPC::BI__builtin_altivec_lvsl: 2087 case PPC::BI__builtin_altivec_lvsr: 2088 { 2089 Ops[1] = Builder.CreateBitCast(Ops[1], llvm::Type::getInt8PtrTy(VMContext)); 2090 2091 Ops[0] = Builder.CreateGEP(Ops[1], Ops[0], "tmp"); 2092 Ops.pop_back(); 2093 2094 switch (BuiltinID) { 2095 default: assert(0 && "Unsupported ld/lvsl/lvsr intrinsic!"); 2096 case PPC::BI__builtin_altivec_lvx: 2097 ID = Intrinsic::ppc_altivec_lvx; 2098 break; 2099 case PPC::BI__builtin_altivec_lvxl: 2100 ID = Intrinsic::ppc_altivec_lvxl; 2101 break; 2102 case PPC::BI__builtin_altivec_lvebx: 2103 ID = Intrinsic::ppc_altivec_lvebx; 2104 break; 2105 case PPC::BI__builtin_altivec_lvehx: 2106 ID = Intrinsic::ppc_altivec_lvehx; 2107 break; 2108 case PPC::BI__builtin_altivec_lvewx: 2109 ID = Intrinsic::ppc_altivec_lvewx; 2110 break; 2111 case PPC::BI__builtin_altivec_lvsl: 2112 ID = Intrinsic::ppc_altivec_lvsl; 2113 break; 2114 case PPC::BI__builtin_altivec_lvsr: 2115 ID = Intrinsic::ppc_altivec_lvsr; 2116 break; 2117 } 2118 llvm::Function *F = CGM.getIntrinsic(ID); 2119 return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), ""); 2120 } 2121 2122 // vec_st 2123 case PPC::BI__builtin_altivec_stvx: 2124 case PPC::BI__builtin_altivec_stvxl: 2125 case PPC::BI__builtin_altivec_stvebx: 2126 case PPC::BI__builtin_altivec_stvehx: 2127 case PPC::BI__builtin_altivec_stvewx: 2128 { 2129 Ops[2] = Builder.CreateBitCast(Ops[2], llvm::Type::getInt8PtrTy(VMContext)); 2130 Ops[1] = Builder.CreateGEP(Ops[2], Ops[1], "tmp"); 2131 Ops.pop_back(); 2132 2133 switch (BuiltinID) { 2134 default: assert(0 && "Unsupported st intrinsic!"); 2135 case PPC::BI__builtin_altivec_stvx: 2136 ID = Intrinsic::ppc_altivec_stvx; 2137 break; 2138 case PPC::BI__builtin_altivec_stvxl: 2139 ID = Intrinsic::ppc_altivec_stvxl; 2140 break; 2141 case PPC::BI__builtin_altivec_stvebx: 2142 ID = Intrinsic::ppc_altivec_stvebx; 2143 break; 2144 case PPC::BI__builtin_altivec_stvehx: 2145 ID = Intrinsic::ppc_altivec_stvehx; 2146 break; 2147 case PPC::BI__builtin_altivec_stvewx: 2148 ID = Intrinsic::ppc_altivec_stvewx; 2149 break; 2150 } 2151 llvm::Function *F = CGM.getIntrinsic(ID); 2152 return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), ""); 2153 } 2154 } 2155 return 0; 2156} 2157