CGCall.cpp revision 800588fd230d2c37ddce8fbf4a3881352715d700
1//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// These classes wrap the information about a call or function 11// definition used to handle ABI compliancy. 12// 13//===----------------------------------------------------------------------===// 14 15#include "CGCall.h" 16#include "ABIInfo.h" 17#include "CodeGenFunction.h" 18#include "CodeGenModule.h" 19#include "clang/Basic/TargetInfo.h" 20#include "clang/AST/Decl.h" 21#include "clang/AST/DeclCXX.h" 22#include "clang/AST/DeclObjC.h" 23#include "clang/Frontend/CodeGenOptions.h" 24#include "llvm/Attributes.h" 25#include "llvm/Support/CallSite.h" 26#include "llvm/Target/TargetData.h" 27using namespace clang; 28using namespace CodeGen; 29 30/***/ 31 32static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) { 33 switch (CC) { 34 default: return llvm::CallingConv::C; 35 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 36 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 37 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 38 } 39} 40 41/// Derives the 'this' type for codegen purposes, i.e. ignoring method 42/// qualification. 43/// FIXME: address space qualification? 44static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) { 45 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 46 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 47} 48 49/// Returns the canonical formal type of the given C++ method. 50static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 51 return MD->getType()->getCanonicalTypeUnqualified() 52 .getAs<FunctionProtoType>(); 53} 54 55/// Returns the "extra-canonicalized" return type, which discards 56/// qualifiers on the return type. Codegen doesn't care about them, 57/// and it makes ABI code a little easier to be able to assume that 58/// all parameter and return types are top-level unqualified. 59static CanQualType GetReturnType(QualType RetTy) { 60 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 61} 62 63const CGFunctionInfo & 64CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP, 65 bool IsRecursive) { 66 return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(), 67 llvm::SmallVector<CanQualType, 16>(), 68 FTNP->getExtInfo(), IsRecursive); 69} 70 71/// \param Args - contains any initial parameters besides those 72/// in the formal type 73static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT, 74 llvm::SmallVectorImpl<CanQualType> &ArgTys, 75 CanQual<FunctionProtoType> FTP, 76 bool IsRecursive = false) { 77 // FIXME: Kill copy. 78 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) 79 ArgTys.push_back(FTP->getArgType(i)); 80 CanQualType ResTy = FTP->getResultType().getUnqualifiedType(); 81 return CGT.getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo(), IsRecursive); 82} 83 84const CGFunctionInfo & 85CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP, 86 bool IsRecursive) { 87 llvm::SmallVector<CanQualType, 16> ArgTys; 88 return ::getFunctionInfo(*this, ArgTys, FTP, IsRecursive); 89} 90 91static CallingConv getCallingConventionForDecl(const Decl *D) { 92 // Set the appropriate calling convention for the Function. 93 if (D->hasAttr<StdCallAttr>()) 94 return CC_X86StdCall; 95 96 if (D->hasAttr<FastCallAttr>()) 97 return CC_X86FastCall; 98 99 if (D->hasAttr<ThisCallAttr>()) 100 return CC_X86ThisCall; 101 102 return CC_C; 103} 104 105const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD, 106 const FunctionProtoType *FTP) { 107 llvm::SmallVector<CanQualType, 16> ArgTys; 108 109 // Add the 'this' pointer. 110 ArgTys.push_back(GetThisType(Context, RD)); 111 112 return ::getFunctionInfo(*this, ArgTys, 113 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); 114} 115 116const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) { 117 llvm::SmallVector<CanQualType, 16> ArgTys; 118 119 // Add the 'this' pointer unless this is a static method. 120 if (MD->isInstance()) 121 ArgTys.push_back(GetThisType(Context, MD->getParent())); 122 123 return ::getFunctionInfo(*this, ArgTys, GetFormalType(MD)); 124} 125 126const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D, 127 CXXCtorType Type) { 128 llvm::SmallVector<CanQualType, 16> ArgTys; 129 130 // Add the 'this' pointer. 131 ArgTys.push_back(GetThisType(Context, D->getParent())); 132 133 // Check if we need to add a VTT parameter (which has type void **). 134 if (Type == Ctor_Base && D->getParent()->getNumVBases() != 0) 135 ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy)); 136 137 return ::getFunctionInfo(*this, ArgTys, GetFormalType(D)); 138} 139 140const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXDestructorDecl *D, 141 CXXDtorType Type) { 142 llvm::SmallVector<CanQualType, 16> ArgTys; 143 144 // Add the 'this' pointer. 145 ArgTys.push_back(GetThisType(Context, D->getParent())); 146 147 // Check if we need to add a VTT parameter (which has type void **). 148 if (Type == Dtor_Base && D->getParent()->getNumVBases() != 0) 149 ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy)); 150 151 return ::getFunctionInfo(*this, ArgTys, GetFormalType(D)); 152} 153 154const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) { 155 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 156 if (MD->isInstance()) 157 return getFunctionInfo(MD); 158 159 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 160 assert(isa<FunctionType>(FTy)); 161 if (isa<FunctionNoProtoType>(FTy)) 162 return getFunctionInfo(FTy.getAs<FunctionNoProtoType>()); 163 assert(isa<FunctionProtoType>(FTy)); 164 return getFunctionInfo(FTy.getAs<FunctionProtoType>()); 165} 166 167const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) { 168 llvm::SmallVector<CanQualType, 16> ArgTys; 169 ArgTys.push_back(Context.getCanonicalParamType(MD->getSelfDecl()->getType())); 170 ArgTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 171 // FIXME: Kill copy? 172 for (ObjCMethodDecl::param_iterator i = MD->param_begin(), 173 e = MD->param_end(); i != e; ++i) { 174 ArgTys.push_back(Context.getCanonicalParamType((*i)->getType())); 175 } 176 return getFunctionInfo(GetReturnType(MD->getResultType()), 177 ArgTys, 178 FunctionType::ExtInfo( 179 /*NoReturn*/ false, 180 /*RegParm*/ 0, 181 getCallingConventionForDecl(MD))); 182} 183 184const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) { 185 // FIXME: Do we need to handle ObjCMethodDecl? 186 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 187 188 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 189 return getFunctionInfo(CD, GD.getCtorType()); 190 191 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) 192 return getFunctionInfo(DD, GD.getDtorType()); 193 194 return getFunctionInfo(FD); 195} 196 197const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy, 198 const CallArgList &Args, 199 const FunctionType::ExtInfo &Info) { 200 // FIXME: Kill copy. 201 llvm::SmallVector<CanQualType, 16> ArgTys; 202 for (CallArgList::const_iterator i = Args.begin(), e = Args.end(); 203 i != e; ++i) 204 ArgTys.push_back(Context.getCanonicalParamType(i->second)); 205 return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info); 206} 207 208const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy, 209 const FunctionArgList &Args, 210 const FunctionType::ExtInfo &Info) { 211 // FIXME: Kill copy. 212 llvm::SmallVector<CanQualType, 16> ArgTys; 213 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 214 i != e; ++i) 215 ArgTys.push_back(Context.getCanonicalParamType(i->second)); 216 return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info); 217} 218 219const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy, 220 const llvm::SmallVectorImpl<CanQualType> &ArgTys, 221 const FunctionType::ExtInfo &Info, 222 bool IsRecursive) { 223#ifndef NDEBUG 224 for (llvm::SmallVectorImpl<CanQualType>::const_iterator 225 I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I) 226 assert(I->isCanonicalAsParam()); 227#endif 228 229 unsigned CC = ClangCallConvToLLVMCallConv(Info.getCC()); 230 231 // Lookup or create unique function info. 232 llvm::FoldingSetNodeID ID; 233 CGFunctionInfo::Profile(ID, Info, ResTy, 234 ArgTys.begin(), ArgTys.end()); 235 236 void *InsertPos = 0; 237 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos); 238 if (FI) 239 return *FI; 240 241 // Construct the function info. 242 FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getRegParm(), ResTy, 243 ArgTys.data(), ArgTys.size()); 244 FunctionInfos.InsertNode(FI, InsertPos); 245 246 // Compute ABI information. 247 getABIInfo().computeInfo(*FI); 248 249 // Loop over all of the computed argument and return value info. If any of 250 // them are direct or extend without a specified coerce type, specify the 251 // default now. 252 ABIArgInfo &RetInfo = FI->getReturnInfo(); 253 if (RetInfo.canHaveCoerceToType() && RetInfo.getCoerceToType() == 0) 254 RetInfo.setCoerceToType(ConvertTypeRecursive(FI->getReturnType())); 255 256 for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end(); 257 I != E; ++I) 258 if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0) 259 I->info.setCoerceToType(ConvertTypeRecursive(I->type)); 260 261 // If this is a top-level call and ConvertTypeRecursive hit unresolved pointer 262 // types, resolve them now. These pointers may point to this function, which 263 // we *just* filled in the FunctionInfo for. 264 if (!IsRecursive && !PointersToResolve.empty()) 265 HandleLateResolvedPointers(); 266 267 return *FI; 268} 269 270CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention, 271 bool _NoReturn, unsigned _RegParm, 272 CanQualType ResTy, 273 const CanQualType *ArgTys, 274 unsigned NumArgTys) 275 : CallingConvention(_CallingConvention), 276 EffectiveCallingConvention(_CallingConvention), 277 NoReturn(_NoReturn), RegParm(_RegParm) 278{ 279 NumArgs = NumArgTys; 280 281 // FIXME: Coallocate with the CGFunctionInfo object. 282 Args = new ArgInfo[1 + NumArgTys]; 283 Args[0].type = ResTy; 284 for (unsigned i = 0; i != NumArgTys; ++i) 285 Args[1 + i].type = ArgTys[i]; 286} 287 288/***/ 289 290void CodeGenTypes::GetExpandedTypes(QualType Ty, 291 std::vector<const llvm::Type*> &ArgTys, 292 bool IsRecursive) { 293 const RecordType *RT = Ty->getAsStructureType(); 294 assert(RT && "Can only expand structure types."); 295 const RecordDecl *RD = RT->getDecl(); 296 assert(!RD->hasFlexibleArrayMember() && 297 "Cannot expand structure with flexible array."); 298 299 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 300 i != e; ++i) { 301 const FieldDecl *FD = *i; 302 assert(!FD->isBitField() && 303 "Cannot expand structure with bit-field members."); 304 305 QualType FT = FD->getType(); 306 if (CodeGenFunction::hasAggregateLLVMType(FT)) 307 GetExpandedTypes(FT, ArgTys, IsRecursive); 308 else 309 ArgTys.push_back(ConvertType(FT, IsRecursive)); 310 } 311} 312 313llvm::Function::arg_iterator 314CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, 315 llvm::Function::arg_iterator AI) { 316 const RecordType *RT = Ty->getAsStructureType(); 317 assert(RT && "Can only expand structure types."); 318 319 RecordDecl *RD = RT->getDecl(); 320 assert(LV.isSimple() && 321 "Unexpected non-simple lvalue during struct expansion."); 322 llvm::Value *Addr = LV.getAddress(); 323 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 324 i != e; ++i) { 325 FieldDecl *FD = *i; 326 QualType FT = FD->getType(); 327 328 // FIXME: What are the right qualifiers here? 329 LValue LV = EmitLValueForField(Addr, FD, 0); 330 if (CodeGenFunction::hasAggregateLLVMType(FT)) { 331 AI = ExpandTypeFromArgs(FT, LV, AI); 332 } else { 333 EmitStoreThroughLValue(RValue::get(AI), LV, FT); 334 ++AI; 335 } 336 } 337 338 return AI; 339} 340 341void 342CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV, 343 llvm::SmallVector<llvm::Value*, 16> &Args) { 344 const RecordType *RT = Ty->getAsStructureType(); 345 assert(RT && "Can only expand structure types."); 346 347 RecordDecl *RD = RT->getDecl(); 348 assert(RV.isAggregate() && "Unexpected rvalue during struct expansion"); 349 llvm::Value *Addr = RV.getAggregateAddr(); 350 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 351 i != e; ++i) { 352 FieldDecl *FD = *i; 353 QualType FT = FD->getType(); 354 355 // FIXME: What are the right qualifiers here? 356 LValue LV = EmitLValueForField(Addr, FD, 0); 357 if (CodeGenFunction::hasAggregateLLVMType(FT)) { 358 ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args); 359 } else { 360 RValue RV = EmitLoadOfLValue(LV, FT); 361 assert(RV.isScalar() && 362 "Unexpected non-scalar rvalue during struct expansion."); 363 Args.push_back(RV.getScalarVal()); 364 } 365 } 366} 367 368/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 369/// accessing some number of bytes out of it, try to gep into the struct to get 370/// at its inner goodness. Dive as deep as possible without entering an element 371/// with an in-memory size smaller than DstSize. 372static llvm::Value * 373EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr, 374 const llvm::StructType *SrcSTy, 375 uint64_t DstSize, CodeGenFunction &CGF) { 376 // We can't dive into a zero-element struct. 377 if (SrcSTy->getNumElements() == 0) return SrcPtr; 378 379 const llvm::Type *FirstElt = SrcSTy->getElementType(0); 380 381 // If the first elt is at least as large as what we're looking for, or if the 382 // first element is the same size as the whole struct, we can enter it. 383 uint64_t FirstEltSize = 384 CGF.CGM.getTargetData().getTypeAllocSize(FirstElt); 385 if (FirstEltSize < DstSize && 386 FirstEltSize < CGF.CGM.getTargetData().getTypeAllocSize(SrcSTy)) 387 return SrcPtr; 388 389 // GEP into the first element. 390 SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive"); 391 392 // If the first element is a struct, recurse. 393 const llvm::Type *SrcTy = 394 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 395 if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 396 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 397 398 return SrcPtr; 399} 400 401/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 402/// are either integers or pointers. This does a truncation of the value if it 403/// is too large or a zero extension if it is too small. 404static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 405 const llvm::Type *Ty, 406 CodeGenFunction &CGF) { 407 if (Val->getType() == Ty) 408 return Val; 409 410 if (isa<llvm::PointerType>(Val->getType())) { 411 // If this is Pointer->Pointer avoid conversion to and from int. 412 if (isa<llvm::PointerType>(Ty)) 413 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 414 415 // Convert the pointer to an integer so we can play with its width. 416 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 417 } 418 419 const llvm::Type *DestIntTy = Ty; 420 if (isa<llvm::PointerType>(DestIntTy)) 421 DestIntTy = CGF.IntPtrTy; 422 423 if (Val->getType() != DestIntTy) 424 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 425 426 if (isa<llvm::PointerType>(Ty)) 427 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 428 return Val; 429} 430 431 432 433/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 434/// a pointer to an object of type \arg Ty. 435/// 436/// This safely handles the case when the src type is smaller than the 437/// destination type; in this situation the values of bits which not 438/// present in the src are undefined. 439static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr, 440 const llvm::Type *Ty, 441 CodeGenFunction &CGF) { 442 const llvm::Type *SrcTy = 443 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 444 445 // If SrcTy and Ty are the same, just do a load. 446 if (SrcTy == Ty) 447 return CGF.Builder.CreateLoad(SrcPtr); 448 449 uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty); 450 451 if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 452 SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 453 SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 454 } 455 456 uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy); 457 458 // If the source and destination are integer or pointer types, just do an 459 // extension or truncation to the desired type. 460 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 461 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 462 llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr); 463 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 464 } 465 466 // If load is legal, just bitcast the src pointer. 467 if (SrcSize >= DstSize) { 468 // Generally SrcSize is never greater than DstSize, since this means we are 469 // losing bits. However, this can happen in cases where the structure has 470 // additional padding, for example due to a user specified alignment. 471 // 472 // FIXME: Assert that we aren't truncating non-padding bits when have access 473 // to that information. 474 llvm::Value *Casted = 475 CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty)); 476 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); 477 // FIXME: Use better alignment / avoid requiring aligned load. 478 Load->setAlignment(1); 479 return Load; 480 } 481 482 // Otherwise do coercion through memory. This is stupid, but 483 // simple. 484 llvm::Value *Tmp = CGF.CreateTempAlloca(Ty); 485 llvm::Value *Casted = 486 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy)); 487 llvm::StoreInst *Store = 488 CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted); 489 // FIXME: Use better alignment / avoid requiring aligned store. 490 Store->setAlignment(1); 491 return CGF.Builder.CreateLoad(Tmp); 492} 493 494/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 495/// where the source and destination may have different types. 496/// 497/// This safely handles the case when the src type is larger than the 498/// destination type; the upper bits of the src will be lost. 499static void CreateCoercedStore(llvm::Value *Src, 500 llvm::Value *DstPtr, 501 bool DstIsVolatile, 502 CodeGenFunction &CGF) { 503 const llvm::Type *SrcTy = Src->getType(); 504 const llvm::Type *DstTy = 505 cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 506 if (SrcTy == DstTy) { 507 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 508 return; 509 } 510 511 uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy); 512 513 if (const llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 514 DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF); 515 DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 516 } 517 518 // If the source and destination are integer or pointer types, just do an 519 // extension or truncation to the desired type. 520 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 521 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 522 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 523 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 524 return; 525 } 526 527 uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy); 528 529 // If store is legal, just bitcast the src pointer. 530 if (SrcSize <= DstSize) { 531 llvm::Value *Casted = 532 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy)); 533 // FIXME: Use better alignment / avoid requiring aligned store. 534 CGF.Builder.CreateStore(Src, Casted, DstIsVolatile)->setAlignment(1); 535 } else { 536 // Otherwise do coercion through memory. This is stupid, but 537 // simple. 538 539 // Generally SrcSize is never greater than DstSize, since this means we are 540 // losing bits. However, this can happen in cases where the structure has 541 // additional padding, for example due to a user specified alignment. 542 // 543 // FIXME: Assert that we aren't truncating non-padding bits when have access 544 // to that information. 545 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy); 546 CGF.Builder.CreateStore(Src, Tmp); 547 llvm::Value *Casted = 548 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy)); 549 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); 550 // FIXME: Use better alignment / avoid requiring aligned load. 551 Load->setAlignment(1); 552 CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile); 553 } 554} 555 556/***/ 557 558bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 559 return FI.getReturnInfo().isIndirect(); 560} 561 562bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 563 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 564 switch (BT->getKind()) { 565 default: 566 return false; 567 case BuiltinType::Float: 568 return getContext().Target.useObjCFPRetForRealType(TargetInfo::Float); 569 case BuiltinType::Double: 570 return getContext().Target.useObjCFPRetForRealType(TargetInfo::Double); 571 case BuiltinType::LongDouble: 572 return getContext().Target.useObjCFPRetForRealType( 573 TargetInfo::LongDouble); 574 } 575 } 576 577 return false; 578} 579 580const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 581 const CGFunctionInfo &FI = getFunctionInfo(GD); 582 583 // For definition purposes, don't consider a K&R function variadic. 584 bool Variadic = false; 585 if (const FunctionProtoType *FPT = 586 cast<FunctionDecl>(GD.getDecl())->getType()->getAs<FunctionProtoType>()) 587 Variadic = FPT->isVariadic(); 588 589 return GetFunctionType(FI, Variadic, false); 590} 591 592const llvm::FunctionType * 593CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic, 594 bool IsRecursive) { 595 std::vector<const llvm::Type*> ArgTys; 596 597 const llvm::Type *ResultType = 0; 598 599 QualType RetTy = FI.getReturnType(); 600 const ABIArgInfo &RetAI = FI.getReturnInfo(); 601 switch (RetAI.getKind()) { 602 case ABIArgInfo::Expand: 603 assert(0 && "Invalid ABI kind for return argument"); 604 605 case ABIArgInfo::Extend: 606 case ABIArgInfo::Direct: 607 ResultType = RetAI.getCoerceToType(); 608 break; 609 610 case ABIArgInfo::Indirect: { 611 assert(!RetAI.getIndirectAlign() && "Align unused on indirect return."); 612 ResultType = llvm::Type::getVoidTy(getLLVMContext()); 613 const llvm::Type *STy = ConvertType(RetTy, IsRecursive); 614 ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace())); 615 break; 616 } 617 618 case ABIArgInfo::Ignore: 619 ResultType = llvm::Type::getVoidTy(getLLVMContext()); 620 break; 621 } 622 623 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 624 ie = FI.arg_end(); it != ie; ++it) { 625 const ABIArgInfo &AI = it->info; 626 627 switch (AI.getKind()) { 628 case ABIArgInfo::Ignore: 629 break; 630 631 case ABIArgInfo::Indirect: { 632 // indirect arguments are always on the stack, which is addr space #0. 633 const llvm::Type *LTy = ConvertTypeForMem(it->type, IsRecursive); 634 ArgTys.push_back(llvm::PointerType::getUnqual(LTy)); 635 break; 636 } 637 638 case ABIArgInfo::Extend: 639 case ABIArgInfo::Direct: 640 // If the coerce-to type is a first class aggregate, flatten it. Either 641 // way is semantically identical, but fast-isel and the optimizer 642 // generally likes scalar values better than FCAs. 643 const llvm::Type *ArgTy = AI.getCoerceToType(); 644 if (const llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgTy)) { 645 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) 646 ArgTys.push_back(STy->getElementType(i)); 647 } else { 648 ArgTys.push_back(ArgTy); 649 } 650 break; 651 652 case ABIArgInfo::Expand: 653 GetExpandedTypes(it->type, ArgTys, IsRecursive); 654 break; 655 } 656 } 657 658 return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic); 659} 660 661const llvm::Type * 662CodeGenTypes::GetFunctionTypeForVTable(const CXXMethodDecl *MD) { 663 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 664 665 if (!VerifyFuncTypeComplete(FPT)) 666 return GetFunctionType(getFunctionInfo(MD), FPT->isVariadic(), false); 667 668 return llvm::OpaqueType::get(getLLVMContext()); 669} 670 671void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, 672 const Decl *TargetDecl, 673 AttributeListType &PAL, 674 unsigned &CallingConv) { 675 unsigned FuncAttrs = 0; 676 unsigned RetAttrs = 0; 677 678 CallingConv = FI.getEffectiveCallingConvention(); 679 680 if (FI.isNoReturn()) 681 FuncAttrs |= llvm::Attribute::NoReturn; 682 683 // FIXME: handle sseregparm someday... 684 if (TargetDecl) { 685 if (TargetDecl->hasAttr<NoThrowAttr>()) 686 FuncAttrs |= llvm::Attribute::NoUnwind; 687 else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 688 const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>(); 689 if (FPT && FPT->hasEmptyExceptionSpec()) 690 FuncAttrs |= llvm::Attribute::NoUnwind; 691 } 692 693 if (TargetDecl->hasAttr<NoReturnAttr>()) 694 FuncAttrs |= llvm::Attribute::NoReturn; 695 if (TargetDecl->hasAttr<ConstAttr>()) 696 FuncAttrs |= llvm::Attribute::ReadNone; 697 else if (TargetDecl->hasAttr<PureAttr>()) 698 FuncAttrs |= llvm::Attribute::ReadOnly; 699 if (TargetDecl->hasAttr<MallocAttr>()) 700 RetAttrs |= llvm::Attribute::NoAlias; 701 } 702 703 if (CodeGenOpts.OptimizeSize) 704 FuncAttrs |= llvm::Attribute::OptimizeForSize; 705 if (CodeGenOpts.DisableRedZone) 706 FuncAttrs |= llvm::Attribute::NoRedZone; 707 if (CodeGenOpts.NoImplicitFloat) 708 FuncAttrs |= llvm::Attribute::NoImplicitFloat; 709 710 QualType RetTy = FI.getReturnType(); 711 unsigned Index = 1; 712 const ABIArgInfo &RetAI = FI.getReturnInfo(); 713 switch (RetAI.getKind()) { 714 case ABIArgInfo::Extend: 715 if (RetTy->hasSignedIntegerRepresentation()) 716 RetAttrs |= llvm::Attribute::SExt; 717 else if (RetTy->hasUnsignedIntegerRepresentation()) 718 RetAttrs |= llvm::Attribute::ZExt; 719 break; 720 case ABIArgInfo::Direct: 721 case ABIArgInfo::Ignore: 722 break; 723 724 case ABIArgInfo::Indirect: 725 PAL.push_back(llvm::AttributeWithIndex::get(Index, 726 llvm::Attribute::StructRet)); 727 ++Index; 728 // sret disables readnone and readonly 729 FuncAttrs &= ~(llvm::Attribute::ReadOnly | 730 llvm::Attribute::ReadNone); 731 break; 732 733 case ABIArgInfo::Expand: 734 assert(0 && "Invalid ABI kind for return argument"); 735 } 736 737 if (RetAttrs) 738 PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs)); 739 740 // FIXME: we need to honor command line settings also. 741 // FIXME: RegParm should be reduced in case of nested functions and/or global 742 // register variable. 743 signed RegParm = FI.getRegParm(); 744 745 unsigned PointerWidth = getContext().Target.getPointerWidth(0); 746 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 747 ie = FI.arg_end(); it != ie; ++it) { 748 QualType ParamType = it->type; 749 const ABIArgInfo &AI = it->info; 750 unsigned Attributes = 0; 751 752 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 753 // have the corresponding parameter variable. It doesn't make 754 // sense to do it here because parameters are so fucked up. 755 switch (AI.getKind()) { 756 case ABIArgInfo::Extend: 757 if (ParamType->isSignedIntegerType()) 758 Attributes |= llvm::Attribute::SExt; 759 else if (ParamType->isUnsignedIntegerType()) 760 Attributes |= llvm::Attribute::ZExt; 761 // FALL THROUGH 762 case ABIArgInfo::Direct: 763 if (RegParm > 0 && 764 (ParamType->isIntegerType() || ParamType->isPointerType())) { 765 RegParm -= 766 (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth; 767 if (RegParm >= 0) 768 Attributes |= llvm::Attribute::InReg; 769 } 770 // FIXME: handle sseregparm someday... 771 772 if (const llvm::StructType *STy = 773 dyn_cast<llvm::StructType>(AI.getCoerceToType())) 774 Index += STy->getNumElements()-1; // 1 will be added below. 775 break; 776 777 case ABIArgInfo::Indirect: 778 if (AI.getIndirectByVal()) 779 Attributes |= llvm::Attribute::ByVal; 780 781 Attributes |= 782 llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign()); 783 // byval disables readnone and readonly. 784 FuncAttrs &= ~(llvm::Attribute::ReadOnly | 785 llvm::Attribute::ReadNone); 786 break; 787 788 case ABIArgInfo::Ignore: 789 // Skip increment, no matching LLVM parameter. 790 continue; 791 792 case ABIArgInfo::Expand: { 793 std::vector<const llvm::Type*> Tys; 794 // FIXME: This is rather inefficient. Do we ever actually need to do 795 // anything here? The result should be just reconstructed on the other 796 // side, so extension should be a non-issue. 797 getTypes().GetExpandedTypes(ParamType, Tys, false); 798 Index += Tys.size(); 799 continue; 800 } 801 } 802 803 if (Attributes) 804 PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes)); 805 ++Index; 806 } 807 if (FuncAttrs) 808 PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs)); 809} 810 811void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 812 llvm::Function *Fn, 813 const FunctionArgList &Args) { 814 // If this is an implicit-return-zero function, go ahead and 815 // initialize the return value. TODO: it might be nice to have 816 // a more general mechanism for this that didn't require synthesized 817 // return statements. 818 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) { 819 if (FD->hasImplicitReturnZero()) { 820 QualType RetTy = FD->getResultType().getUnqualifiedType(); 821 const llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 822 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 823 Builder.CreateStore(Zero, ReturnValue); 824 } 825 } 826 827 // FIXME: We no longer need the types from FunctionArgList; lift up and 828 // simplify. 829 830 // Emit allocs for param decls. Give the LLVM Argument nodes names. 831 llvm::Function::arg_iterator AI = Fn->arg_begin(); 832 833 // Name the struct return argument. 834 if (CGM.ReturnTypeUsesSRet(FI)) { 835 AI->setName("agg.result"); 836 ++AI; 837 } 838 839 assert(FI.arg_size() == Args.size() && 840 "Mismatch between function signature & arguments."); 841 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 842 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 843 i != e; ++i, ++info_it) { 844 const VarDecl *Arg = i->first; 845 QualType Ty = info_it->type; 846 const ABIArgInfo &ArgI = info_it->info; 847 848 switch (ArgI.getKind()) { 849 case ABIArgInfo::Indirect: { 850 llvm::Value *V = AI; 851 if (hasAggregateLLVMType(Ty)) { 852 // Do nothing, aggregates and complex variables are accessed by 853 // reference. 854 } else { 855 // Load scalar value from indirect argument. 856 V = EmitLoadOfScalar(V, false, Ty); 857 if (!getContext().typesAreCompatible(Ty, Arg->getType())) { 858 // This must be a promotion, for something like 859 // "void a(x) short x; {..." 860 V = EmitScalarConversion(V, Ty, Arg->getType()); 861 } 862 } 863 EmitParmDecl(*Arg, V); 864 break; 865 } 866 867 case ABIArgInfo::Extend: 868 case ABIArgInfo::Direct: { 869 // If we have the trivial case, handle it with no muss and fuss. 870 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 871 ArgI.getCoerceToType() == ConvertType(Ty)) { 872 assert(AI != Fn->arg_end() && "Argument mismatch!"); 873 llvm::Value *V = AI; 874 875 if (Arg->getType().isRestrictQualified()) 876 AI->addAttr(llvm::Attribute::NoAlias); 877 878 if (!getContext().typesAreCompatible(Ty, Arg->getType())) { 879 // This must be a promotion, for something like 880 // "void a(x) short x; {..." 881 V = EmitScalarConversion(V, Ty, Arg->getType()); 882 } 883 EmitParmDecl(*Arg, V); 884 break; 885 } 886 887 llvm::AllocaInst *Alloca = CreateMemTemp(Ty, "coerce"); 888 889 // The alignment we need to use is the max of the requested alignment for 890 // the argument plus the alignment required by our access code below. 891 unsigned AlignmentToUse = 892 CGF.CGM.getTargetData().getABITypeAlignment(ArgI.getCoerceToType()); 893 AlignmentToUse = std::max(AlignmentToUse, 894 (unsigned)getContext().getDeclAlign(Arg).getQuantity()); 895 896 Alloca->setAlignment(AlignmentToUse); 897 llvm::Value *V = Alloca; 898 899 // If the coerce-to type is a first class aggregate, we flatten it and 900 // pass the elements. Either way is semantically identical, but fast-isel 901 // and the optimizer generally likes scalar values better than FCAs. 902 if (const llvm::StructType *STy = 903 dyn_cast<llvm::StructType>(ArgI.getCoerceToType())) { 904 llvm::Value *Ptr = V; 905 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy)); 906 907 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 908 assert(AI != Fn->arg_end() && "Argument mismatch!"); 909 AI->setName(Arg->getName() + ".coerce" + llvm::Twine(i)); 910 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i); 911 Builder.CreateStore(AI++, EltPtr); 912 } 913 } else { 914 // Simple case, just do a coerced store of the argument into the alloca. 915 assert(AI != Fn->arg_end() && "Argument mismatch!"); 916 AI->setName(Arg->getName() + ".coerce"); 917 CreateCoercedStore(AI++, V, /*DestIsVolatile=*/false, *this); 918 } 919 920 921 // Match to what EmitParmDecl is expecting for this type. 922 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) { 923 V = EmitLoadOfScalar(V, false, Ty); 924 if (!getContext().typesAreCompatible(Ty, Arg->getType())) { 925 // This must be a promotion, for something like 926 // "void a(x) short x; {..." 927 V = EmitScalarConversion(V, Ty, Arg->getType()); 928 } 929 } 930 EmitParmDecl(*Arg, V); 931 continue; // Skip ++AI increment, already done. 932 } 933 934 case ABIArgInfo::Expand: { 935 // If this structure was expanded into multiple arguments then 936 // we need to create a temporary and reconstruct it from the 937 // arguments. 938 llvm::Value *Temp = CreateMemTemp(Ty, Arg->getName() + ".addr"); 939 // FIXME: What are the right qualifiers here? 940 llvm::Function::arg_iterator End = 941 ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp, Qualifiers()), AI); 942 EmitParmDecl(*Arg, Temp); 943 944 // Name the arguments used in expansion and increment AI. 945 unsigned Index = 0; 946 for (; AI != End; ++AI, ++Index) 947 AI->setName(Arg->getName() + "." + llvm::Twine(Index)); 948 continue; 949 } 950 951 case ABIArgInfo::Ignore: 952 // Initialize the local variable appropriately. 953 if (hasAggregateLLVMType(Ty)) 954 EmitParmDecl(*Arg, CreateMemTemp(Ty)); 955 else 956 EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType()))); 957 958 // Skip increment, no matching LLVM parameter. 959 continue; 960 } 961 962 ++AI; 963 } 964 assert(AI == Fn->arg_end() && "Argument mismatch!"); 965} 966 967void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) { 968 // Functions with no result always return void. 969 if (ReturnValue == 0) { 970 Builder.CreateRetVoid(); 971 return; 972 } 973 974 llvm::DebugLoc RetDbgLoc; 975 llvm::Value *RV = 0; 976 QualType RetTy = FI.getReturnType(); 977 const ABIArgInfo &RetAI = FI.getReturnInfo(); 978 979 switch (RetAI.getKind()) { 980 case ABIArgInfo::Indirect: 981 if (RetTy->isAnyComplexType()) { 982 ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false); 983 StoreComplexToAddr(RT, CurFn->arg_begin(), false); 984 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 985 // Do nothing; aggregrates get evaluated directly into the destination. 986 } else { 987 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(), 988 false, RetTy); 989 } 990 break; 991 992 case ABIArgInfo::Extend: 993 case ABIArgInfo::Direct: 994 995 if (RetAI.getCoerceToType() == ConvertType(RetTy)) { 996 // The internal return value temp always will have pointer-to-return-type 997 // type, just do a load. 998 999 // If the instruction right before the insertion point is a store to the 1000 // return value, we can elide the load, zap the store, and usually zap the 1001 // alloca. 1002 llvm::BasicBlock *InsertBB = Builder.GetInsertBlock(); 1003 llvm::StoreInst *SI = 0; 1004 if (InsertBB->empty() || 1005 !(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) || 1006 SI->getPointerOperand() != ReturnValue || SI->isVolatile()) { 1007 RV = Builder.CreateLoad(ReturnValue); 1008 } else { 1009 // Get the stored value and nuke the now-dead store. 1010 RetDbgLoc = SI->getDebugLoc(); 1011 RV = SI->getValueOperand(); 1012 SI->eraseFromParent(); 1013 1014 // If that was the only use of the return value, nuke it as well now. 1015 if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) { 1016 cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent(); 1017 ReturnValue = 0; 1018 } 1019 } 1020 } else { 1021 RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this); 1022 } 1023 break; 1024 1025 case ABIArgInfo::Ignore: 1026 break; 1027 1028 case ABIArgInfo::Expand: 1029 assert(0 && "Invalid ABI kind for return argument"); 1030 } 1031 1032 llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid(); 1033 if (!RetDbgLoc.isUnknown()) 1034 Ret->setDebugLoc(RetDbgLoc); 1035} 1036 1037RValue CodeGenFunction::EmitDelegateCallArg(const VarDecl *Param) { 1038 // StartFunction converted the ABI-lowered parameter(s) into a 1039 // local alloca. We need to turn that into an r-value suitable 1040 // for EmitCall. 1041 llvm::Value *Local = GetAddrOfLocalVar(Param); 1042 1043 QualType ArgType = Param->getType(); 1044 1045 // For the most part, we just need to load the alloca, except: 1046 // 1) aggregate r-values are actually pointers to temporaries, and 1047 // 2) references to aggregates are pointers directly to the aggregate. 1048 // I don't know why references to non-aggregates are different here. 1049 if (const ReferenceType *RefType = ArgType->getAs<ReferenceType>()) { 1050 if (hasAggregateLLVMType(RefType->getPointeeType())) 1051 return RValue::getAggregate(Local); 1052 1053 // Locals which are references to scalars are represented 1054 // with allocas holding the pointer. 1055 return RValue::get(Builder.CreateLoad(Local)); 1056 } 1057 1058 if (ArgType->isAnyComplexType()) 1059 return RValue::getComplex(LoadComplexFromAddr(Local, /*volatile*/ false)); 1060 1061 if (hasAggregateLLVMType(ArgType)) 1062 return RValue::getAggregate(Local); 1063 1064 return RValue::get(EmitLoadOfScalar(Local, false, ArgType)); 1065} 1066 1067RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) { 1068 if (ArgType->isReferenceType()) 1069 return EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0); 1070 1071 return EmitAnyExprToTemp(E); 1072} 1073 1074/// Emits a call or invoke instruction to the given function, depending 1075/// on the current state of the EH stack. 1076llvm::CallSite 1077CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 1078 llvm::Value * const *ArgBegin, 1079 llvm::Value * const *ArgEnd, 1080 const llvm::Twine &Name) { 1081 llvm::BasicBlock *InvokeDest = getInvokeDest(); 1082 if (!InvokeDest) 1083 return Builder.CreateCall(Callee, ArgBegin, ArgEnd, Name); 1084 1085 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 1086 llvm::InvokeInst *Invoke = Builder.CreateInvoke(Callee, ContBB, InvokeDest, 1087 ArgBegin, ArgEnd, Name); 1088 EmitBlock(ContBB); 1089 return Invoke; 1090} 1091 1092RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 1093 llvm::Value *Callee, 1094 ReturnValueSlot ReturnValue, 1095 const CallArgList &CallArgs, 1096 const Decl *TargetDecl, 1097 llvm::Instruction **callOrInvoke) { 1098 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 1099 llvm::SmallVector<llvm::Value*, 16> Args; 1100 1101 // Handle struct-return functions by passing a pointer to the 1102 // location that we would like to return into. 1103 QualType RetTy = CallInfo.getReturnType(); 1104 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 1105 1106 1107 // If the call returns a temporary with struct return, create a temporary 1108 // alloca to hold the result, unless one is given to us. 1109 if (CGM.ReturnTypeUsesSRet(CallInfo)) { 1110 llvm::Value *Value = ReturnValue.getValue(); 1111 if (!Value) 1112 Value = CreateMemTemp(RetTy); 1113 Args.push_back(Value); 1114 } 1115 1116 assert(CallInfo.arg_size() == CallArgs.size() && 1117 "Mismatch between function signature & arguments."); 1118 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 1119 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 1120 I != E; ++I, ++info_it) { 1121 const ABIArgInfo &ArgInfo = info_it->info; 1122 RValue RV = I->first; 1123 1124 switch (ArgInfo.getKind()) { 1125 case ABIArgInfo::Indirect: 1126 if (RV.isScalar() || RV.isComplex()) { 1127 // Make a temporary alloca to pass the argument. 1128 Args.push_back(CreateMemTemp(I->second)); 1129 if (RV.isScalar()) 1130 EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, I->second); 1131 else 1132 StoreComplexToAddr(RV.getComplexVal(), Args.back(), false); 1133 } else { 1134 Args.push_back(RV.getAggregateAddr()); 1135 } 1136 break; 1137 1138 case ABIArgInfo::Ignore: 1139 break; 1140 1141 case ABIArgInfo::Extend: 1142 case ABIArgInfo::Direct: { 1143 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 1144 ArgInfo.getCoerceToType() == ConvertType(info_it->type)) { 1145 if (RV.isScalar()) 1146 Args.push_back(RV.getScalarVal()); 1147 else 1148 Args.push_back(Builder.CreateLoad(RV.getAggregateAddr())); 1149 break; 1150 } 1151 1152 // FIXME: Avoid the conversion through memory if possible. 1153 llvm::Value *SrcPtr; 1154 if (RV.isScalar()) { 1155 SrcPtr = CreateMemTemp(I->second, "coerce"); 1156 EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, I->second); 1157 } else if (RV.isComplex()) { 1158 SrcPtr = CreateMemTemp(I->second, "coerce"); 1159 StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false); 1160 } else 1161 SrcPtr = RV.getAggregateAddr(); 1162 1163 // If the coerce-to type is a first class aggregate, we flatten it and 1164 // pass the elements. Either way is semantically identical, but fast-isel 1165 // and the optimizer generally likes scalar values better than FCAs. 1166 if (const llvm::StructType *STy = 1167 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) { 1168 SrcPtr = Builder.CreateBitCast(SrcPtr, 1169 llvm::PointerType::getUnqual(STy)); 1170 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1171 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i); 1172 llvm::LoadInst *LI = Builder.CreateLoad(EltPtr); 1173 // We don't know what we're loading from. 1174 LI->setAlignment(1); 1175 Args.push_back(LI); 1176 } 1177 } else { 1178 // In the simple case, just pass the coerced loaded value. 1179 Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), 1180 *this)); 1181 } 1182 1183 break; 1184 } 1185 1186 case ABIArgInfo::Expand: 1187 ExpandTypeToArgs(I->second, RV, Args); 1188 break; 1189 } 1190 } 1191 1192 // If the callee is a bitcast of a function to a varargs pointer to function 1193 // type, check to see if we can remove the bitcast. This handles some cases 1194 // with unprototyped functions. 1195 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee)) 1196 if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) { 1197 const llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType()); 1198 const llvm::FunctionType *CurFT = 1199 cast<llvm::FunctionType>(CurPT->getElementType()); 1200 const llvm::FunctionType *ActualFT = CalleeF->getFunctionType(); 1201 1202 if (CE->getOpcode() == llvm::Instruction::BitCast && 1203 ActualFT->getReturnType() == CurFT->getReturnType() && 1204 ActualFT->getNumParams() == CurFT->getNumParams() && 1205 ActualFT->getNumParams() == Args.size()) { 1206 bool ArgsMatch = true; 1207 for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i) 1208 if (ActualFT->getParamType(i) != CurFT->getParamType(i)) { 1209 ArgsMatch = false; 1210 break; 1211 } 1212 1213 // Strip the cast if we can get away with it. This is a nice cleanup, 1214 // but also allows us to inline the function at -O0 if it is marked 1215 // always_inline. 1216 if (ArgsMatch) 1217 Callee = CalleeF; 1218 } 1219 } 1220 1221 1222 unsigned CallingConv; 1223 CodeGen::AttributeListType AttributeList; 1224 CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv); 1225 llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(), 1226 AttributeList.end()); 1227 1228 llvm::BasicBlock *InvokeDest = 0; 1229 if (!(Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) 1230 InvokeDest = getInvokeDest(); 1231 1232 llvm::CallSite CS; 1233 if (!InvokeDest) { 1234 CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size()); 1235 } else { 1236 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 1237 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, 1238 Args.data(), Args.data()+Args.size()); 1239 EmitBlock(Cont); 1240 } 1241 if (callOrInvoke) 1242 *callOrInvoke = CS.getInstruction(); 1243 1244 CS.setAttributes(Attrs); 1245 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 1246 1247 // If the call doesn't return, finish the basic block and clear the 1248 // insertion point; this allows the rest of IRgen to discard 1249 // unreachable code. 1250 if (CS.doesNotReturn()) { 1251 Builder.CreateUnreachable(); 1252 Builder.ClearInsertionPoint(); 1253 1254 // FIXME: For now, emit a dummy basic block because expr emitters in 1255 // generally are not ready to handle emitting expressions at unreachable 1256 // points. 1257 EnsureInsertPoint(); 1258 1259 // Return a reasonable RValue. 1260 return GetUndefRValue(RetTy); 1261 } 1262 1263 llvm::Instruction *CI = CS.getInstruction(); 1264 if (Builder.isNamePreserving() && !CI->getType()->isVoidTy()) 1265 CI->setName("call"); 1266 1267 switch (RetAI.getKind()) { 1268 case ABIArgInfo::Indirect: 1269 if (RetTy->isAnyComplexType()) 1270 return RValue::getComplex(LoadComplexFromAddr(Args[0], false)); 1271 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) 1272 return RValue::getAggregate(Args[0]); 1273 return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy)); 1274 1275 case ABIArgInfo::Ignore: 1276 // If we are ignoring an argument that had a result, make sure to 1277 // construct the appropriate return value for our caller. 1278 return GetUndefRValue(RetTy); 1279 1280 case ABIArgInfo::Extend: 1281 case ABIArgInfo::Direct: { 1282 if (RetAI.getCoerceToType() == ConvertType(RetTy)) { 1283 if (RetTy->isAnyComplexType()) { 1284 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 1285 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 1286 return RValue::getComplex(std::make_pair(Real, Imag)); 1287 } 1288 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 1289 llvm::Value *DestPtr = ReturnValue.getValue(); 1290 bool DestIsVolatile = ReturnValue.isVolatile(); 1291 1292 if (!DestPtr) { 1293 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 1294 DestIsVolatile = false; 1295 } 1296 Builder.CreateStore(CI, DestPtr, DestIsVolatile); 1297 return RValue::getAggregate(DestPtr); 1298 } 1299 return RValue::get(CI); 1300 } 1301 1302 llvm::Value *DestPtr = ReturnValue.getValue(); 1303 bool DestIsVolatile = ReturnValue.isVolatile(); 1304 1305 if (!DestPtr) { 1306 DestPtr = CreateMemTemp(RetTy, "coerce"); 1307 DestIsVolatile = false; 1308 } 1309 1310 CreateCoercedStore(CI, DestPtr, DestIsVolatile, *this); 1311 if (RetTy->isAnyComplexType()) 1312 return RValue::getComplex(LoadComplexFromAddr(DestPtr, false)); 1313 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) 1314 return RValue::getAggregate(DestPtr); 1315 return RValue::get(EmitLoadOfScalar(DestPtr, false, RetTy)); 1316 } 1317 1318 case ABIArgInfo::Expand: 1319 assert(0 && "Invalid ABI kind for return argument"); 1320 } 1321 1322 assert(0 && "Unhandled ABIArgInfo::Kind"); 1323 return RValue::get(0); 1324} 1325 1326/* VarArg handling */ 1327 1328llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) { 1329 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this); 1330} 1331