CGCall.cpp revision 046c294a43024874ff35656c6e785b64e72f1f36
1//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// These classes wrap the information about a call or function 11// definition used to handle ABI compliancy. 12// 13//===----------------------------------------------------------------------===// 14 15#include "CGCall.h" 16#include "CodeGenFunction.h" 17#include "CodeGenModule.h" 18#include "clang/Basic/TargetInfo.h" 19#include "clang/AST/Decl.h" 20#include "clang/AST/DeclCXX.h" 21#include "clang/AST/DeclObjC.h" 22#include "clang/CodeGen/CodeGenOptions.h" 23#include "llvm/Attributes.h" 24#include "llvm/Support/CallSite.h" 25#include "llvm/Target/TargetData.h" 26 27#include "ABIInfo.h" 28 29using namespace clang; 30using namespace CodeGen; 31 32/***/ 33 34// FIXME: Use iterator and sidestep silly type array creation. 35 36static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) { 37 switch (CC) { 38 default: return llvm::CallingConv::C; 39 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 40 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 41 } 42} 43 44/// Derives the 'this' type for codegen purposes, i.e. ignoring method 45/// qualification. 46/// FIXME: address space qualification? 47static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) { 48 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 49 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 50} 51 52/// Returns the canonical formal type of the given C++ method. 53static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 54 return MD->getType()->getCanonicalTypeUnqualified() 55 .getAs<FunctionProtoType>(); 56} 57 58/// Returns the "extra-canonicalized" return type, which discards 59/// qualifiers on the return type. Codegen doesn't care about them, 60/// and it makes ABI code a little easier to be able to assume that 61/// all parameter and return types are top-level unqualified. 62static CanQualType GetReturnType(QualType RetTy) { 63 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 64} 65 66const CGFunctionInfo & 67CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP) { 68 return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(), 69 llvm::SmallVector<CanQualType, 16>(), 70 FTNP->getExtInfo()); 71} 72 73/// \param Args - contains any initial parameters besides those 74/// in the formal type 75static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT, 76 llvm::SmallVectorImpl<CanQualType> &ArgTys, 77 CanQual<FunctionProtoType> FTP) { 78 // FIXME: Kill copy. 79 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) 80 ArgTys.push_back(FTP->getArgType(i)); 81 CanQualType ResTy = FTP->getResultType().getUnqualifiedType(); 82 return CGT.getFunctionInfo(ResTy, ArgTys, 83 FTP->getExtInfo()); 84} 85 86const CGFunctionInfo & 87CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP) { 88 llvm::SmallVector<CanQualType, 16> ArgTys; 89 return ::getFunctionInfo(*this, ArgTys, FTP); 90} 91 92static CallingConv getCallingConventionForDecl(const Decl *D) { 93 // Set the appropriate calling convention for the Function. 94 if (D->hasAttr<StdCallAttr>()) 95 return CC_X86StdCall; 96 97 if (D->hasAttr<FastCallAttr>()) 98 return CC_X86FastCall; 99 100 return CC_C; 101} 102 103const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD, 104 const FunctionProtoType *FTP) { 105 llvm::SmallVector<CanQualType, 16> ArgTys; 106 107 // Add the 'this' pointer. 108 ArgTys.push_back(GetThisType(Context, RD)); 109 110 return ::getFunctionInfo(*this, ArgTys, 111 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); 112} 113 114const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) { 115 llvm::SmallVector<CanQualType, 16> ArgTys; 116 117 // Add the 'this' pointer unless this is a static method. 118 if (MD->isInstance()) 119 ArgTys.push_back(GetThisType(Context, MD->getParent())); 120 121 return ::getFunctionInfo(*this, ArgTys, GetFormalType(MD)); 122} 123 124const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D, 125 CXXCtorType Type) { 126 llvm::SmallVector<CanQualType, 16> ArgTys; 127 128 // Add the 'this' pointer. 129 ArgTys.push_back(GetThisType(Context, D->getParent())); 130 131 // Check if we need to add a VTT parameter (which has type void **). 132 if (Type == Ctor_Base && D->getParent()->getNumVBases() != 0) 133 ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy)); 134 135 return ::getFunctionInfo(*this, ArgTys, GetFormalType(D)); 136} 137 138const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXDestructorDecl *D, 139 CXXDtorType Type) { 140 llvm::SmallVector<CanQualType, 16> ArgTys; 141 142 // Add the 'this' pointer. 143 ArgTys.push_back(GetThisType(Context, D->getParent())); 144 145 // Check if we need to add a VTT parameter (which has type void **). 146 if (Type == Dtor_Base && D->getParent()->getNumVBases() != 0) 147 ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy)); 148 149 return ::getFunctionInfo(*this, ArgTys, GetFormalType(D)); 150} 151 152const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) { 153 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 154 if (MD->isInstance()) 155 return getFunctionInfo(MD); 156 157 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 158 assert(isa<FunctionType>(FTy)); 159 if (isa<FunctionNoProtoType>(FTy)) 160 return getFunctionInfo(FTy.getAs<FunctionNoProtoType>()); 161 assert(isa<FunctionProtoType>(FTy)); 162 return getFunctionInfo(FTy.getAs<FunctionProtoType>()); 163} 164 165const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) { 166 llvm::SmallVector<CanQualType, 16> ArgTys; 167 ArgTys.push_back(Context.getCanonicalParamType(MD->getSelfDecl()->getType())); 168 ArgTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 169 // FIXME: Kill copy? 170 for (ObjCMethodDecl::param_iterator i = MD->param_begin(), 171 e = MD->param_end(); i != e; ++i) { 172 ArgTys.push_back(Context.getCanonicalParamType((*i)->getType())); 173 } 174 return getFunctionInfo(GetReturnType(MD->getResultType()), 175 ArgTys, 176 FunctionType::ExtInfo( 177 /*NoReturn*/ false, 178 /*RegParm*/ 0, 179 getCallingConventionForDecl(MD))); 180} 181 182const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) { 183 // FIXME: Do we need to handle ObjCMethodDecl? 184 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 185 186 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 187 return getFunctionInfo(CD, GD.getCtorType()); 188 189 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) 190 return getFunctionInfo(DD, GD.getDtorType()); 191 192 return getFunctionInfo(FD); 193} 194 195const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy, 196 const CallArgList &Args, 197 const FunctionType::ExtInfo &Info) { 198 // FIXME: Kill copy. 199 llvm::SmallVector<CanQualType, 16> ArgTys; 200 for (CallArgList::const_iterator i = Args.begin(), e = Args.end(); 201 i != e; ++i) 202 ArgTys.push_back(Context.getCanonicalParamType(i->second)); 203 return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info); 204} 205 206const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy, 207 const FunctionArgList &Args, 208 const FunctionType::ExtInfo &Info) { 209 // FIXME: Kill copy. 210 llvm::SmallVector<CanQualType, 16> ArgTys; 211 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 212 i != e; ++i) 213 ArgTys.push_back(Context.getCanonicalParamType(i->second)); 214 return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info); 215} 216 217const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy, 218 const llvm::SmallVectorImpl<CanQualType> &ArgTys, 219 const FunctionType::ExtInfo &Info) { 220#ifndef NDEBUG 221 for (llvm::SmallVectorImpl<CanQualType>::const_iterator 222 I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I) 223 assert(I->isCanonicalAsParam()); 224#endif 225 226 unsigned CC = ClangCallConvToLLVMCallConv(Info.getCC()); 227 228 // Lookup or create unique function info. 229 llvm::FoldingSetNodeID ID; 230 CGFunctionInfo::Profile(ID, Info, ResTy, 231 ArgTys.begin(), ArgTys.end()); 232 233 void *InsertPos = 0; 234 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos); 235 if (FI) 236 return *FI; 237 238 // Construct the function info. 239 FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getRegParm(), ResTy, ArgTys); 240 FunctionInfos.InsertNode(FI, InsertPos); 241 242 // Compute ABI information. 243 getABIInfo().computeInfo(*FI, getContext(), TheModule.getContext()); 244 245 return *FI; 246} 247 248CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention, 249 bool _NoReturn, 250 unsigned _RegParm, 251 CanQualType ResTy, 252 const llvm::SmallVectorImpl<CanQualType> &ArgTys) 253 : CallingConvention(_CallingConvention), 254 EffectiveCallingConvention(_CallingConvention), 255 NoReturn(_NoReturn), RegParm(_RegParm) 256{ 257 NumArgs = ArgTys.size(); 258 Args = new ArgInfo[1 + NumArgs]; 259 Args[0].type = ResTy; 260 for (unsigned i = 0; i < NumArgs; ++i) 261 Args[1 + i].type = ArgTys[i]; 262} 263 264/***/ 265 266void CodeGenTypes::GetExpandedTypes(QualType Ty, 267 std::vector<const llvm::Type*> &ArgTys) { 268 const RecordType *RT = Ty->getAsStructureType(); 269 assert(RT && "Can only expand structure types."); 270 const RecordDecl *RD = RT->getDecl(); 271 assert(!RD->hasFlexibleArrayMember() && 272 "Cannot expand structure with flexible array."); 273 274 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 275 i != e; ++i) { 276 const FieldDecl *FD = *i; 277 assert(!FD->isBitField() && 278 "Cannot expand structure with bit-field members."); 279 280 QualType FT = FD->getType(); 281 if (CodeGenFunction::hasAggregateLLVMType(FT)) { 282 GetExpandedTypes(FT, ArgTys); 283 } else { 284 ArgTys.push_back(ConvertType(FT)); 285 } 286 } 287} 288 289llvm::Function::arg_iterator 290CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, 291 llvm::Function::arg_iterator AI) { 292 const RecordType *RT = Ty->getAsStructureType(); 293 assert(RT && "Can only expand structure types."); 294 295 RecordDecl *RD = RT->getDecl(); 296 assert(LV.isSimple() && 297 "Unexpected non-simple lvalue during struct expansion."); 298 llvm::Value *Addr = LV.getAddress(); 299 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 300 i != e; ++i) { 301 FieldDecl *FD = *i; 302 QualType FT = FD->getType(); 303 304 // FIXME: What are the right qualifiers here? 305 LValue LV = EmitLValueForField(Addr, FD, 0); 306 if (CodeGenFunction::hasAggregateLLVMType(FT)) { 307 AI = ExpandTypeFromArgs(FT, LV, AI); 308 } else { 309 EmitStoreThroughLValue(RValue::get(AI), LV, FT); 310 ++AI; 311 } 312 } 313 314 return AI; 315} 316 317void 318CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV, 319 llvm::SmallVector<llvm::Value*, 16> &Args) { 320 const RecordType *RT = Ty->getAsStructureType(); 321 assert(RT && "Can only expand structure types."); 322 323 RecordDecl *RD = RT->getDecl(); 324 assert(RV.isAggregate() && "Unexpected rvalue during struct expansion"); 325 llvm::Value *Addr = RV.getAggregateAddr(); 326 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 327 i != e; ++i) { 328 FieldDecl *FD = *i; 329 QualType FT = FD->getType(); 330 331 // FIXME: What are the right qualifiers here? 332 LValue LV = EmitLValueForField(Addr, FD, 0); 333 if (CodeGenFunction::hasAggregateLLVMType(FT)) { 334 ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args); 335 } else { 336 RValue RV = EmitLoadOfLValue(LV, FT); 337 assert(RV.isScalar() && 338 "Unexpected non-scalar rvalue during struct expansion."); 339 Args.push_back(RV.getScalarVal()); 340 } 341 } 342} 343 344/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 345/// a pointer to an object of type \arg Ty. 346/// 347/// This safely handles the case when the src type is smaller than the 348/// destination type; in this situation the values of bits which not 349/// present in the src are undefined. 350static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr, 351 const llvm::Type *Ty, 352 CodeGenFunction &CGF) { 353 const llvm::Type *SrcTy = 354 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 355 uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy); 356 uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty); 357 358 // If load is legal, just bitcast the src pointer. 359 if (SrcSize >= DstSize) { 360 // Generally SrcSize is never greater than DstSize, since this means we are 361 // losing bits. However, this can happen in cases where the structure has 362 // additional padding, for example due to a user specified alignment. 363 // 364 // FIXME: Assert that we aren't truncating non-padding bits when have access 365 // to that information. 366 llvm::Value *Casted = 367 CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty)); 368 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); 369 // FIXME: Use better alignment / avoid requiring aligned load. 370 Load->setAlignment(1); 371 return Load; 372 } else { 373 // Otherwise do coercion through memory. This is stupid, but 374 // simple. 375 llvm::Value *Tmp = CGF.CreateTempAlloca(Ty); 376 llvm::Value *Casted = 377 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy)); 378 llvm::StoreInst *Store = 379 CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted); 380 // FIXME: Use better alignment / avoid requiring aligned store. 381 Store->setAlignment(1); 382 return CGF.Builder.CreateLoad(Tmp); 383 } 384} 385 386/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 387/// where the source and destination may have different types. 388/// 389/// This safely handles the case when the src type is larger than the 390/// destination type; the upper bits of the src will be lost. 391static void CreateCoercedStore(llvm::Value *Src, 392 llvm::Value *DstPtr, 393 bool DstIsVolatile, 394 CodeGenFunction &CGF) { 395 const llvm::Type *SrcTy = Src->getType(); 396 const llvm::Type *DstTy = 397 cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 398 399 uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy); 400 uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy); 401 402 // If store is legal, just bitcast the src pointer. 403 if (SrcSize <= DstSize) { 404 llvm::Value *Casted = 405 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy)); 406 // FIXME: Use better alignment / avoid requiring aligned store. 407 CGF.Builder.CreateStore(Src, Casted, DstIsVolatile)->setAlignment(1); 408 } else { 409 // Otherwise do coercion through memory. This is stupid, but 410 // simple. 411 412 // Generally SrcSize is never greater than DstSize, since this means we are 413 // losing bits. However, this can happen in cases where the structure has 414 // additional padding, for example due to a user specified alignment. 415 // 416 // FIXME: Assert that we aren't truncating non-padding bits when have access 417 // to that information. 418 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy); 419 CGF.Builder.CreateStore(Src, Tmp); 420 llvm::Value *Casted = 421 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy)); 422 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); 423 // FIXME: Use better alignment / avoid requiring aligned load. 424 Load->setAlignment(1); 425 CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile); 426 } 427} 428 429/***/ 430 431bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) { 432 return FI.getReturnInfo().isIndirect(); 433} 434 435const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 436 const CGFunctionInfo &FI = getFunctionInfo(GD); 437 438 // For definition purposes, don't consider a K&R function variadic. 439 bool Variadic = false; 440 if (const FunctionProtoType *FPT = 441 cast<FunctionDecl>(GD.getDecl())->getType()->getAs<FunctionProtoType>()) 442 Variadic = FPT->isVariadic(); 443 444 return GetFunctionType(FI, Variadic); 445} 446 447const llvm::FunctionType * 448CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) { 449 std::vector<const llvm::Type*> ArgTys; 450 451 const llvm::Type *ResultType = 0; 452 453 QualType RetTy = FI.getReturnType(); 454 const ABIArgInfo &RetAI = FI.getReturnInfo(); 455 switch (RetAI.getKind()) { 456 case ABIArgInfo::Expand: 457 assert(0 && "Invalid ABI kind for return argument"); 458 459 case ABIArgInfo::Extend: 460 case ABIArgInfo::Direct: 461 ResultType = ConvertType(RetTy); 462 break; 463 464 case ABIArgInfo::Indirect: { 465 assert(!RetAI.getIndirectAlign() && "Align unused on indirect return."); 466 ResultType = llvm::Type::getVoidTy(getLLVMContext()); 467 const llvm::Type *STy = ConvertType(RetTy); 468 ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace())); 469 break; 470 } 471 472 case ABIArgInfo::Ignore: 473 ResultType = llvm::Type::getVoidTy(getLLVMContext()); 474 break; 475 476 case ABIArgInfo::Coerce: 477 ResultType = RetAI.getCoerceToType(); 478 break; 479 } 480 481 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 482 ie = FI.arg_end(); it != ie; ++it) { 483 const ABIArgInfo &AI = it->info; 484 485 switch (AI.getKind()) { 486 case ABIArgInfo::Ignore: 487 break; 488 489 case ABIArgInfo::Coerce: 490 ArgTys.push_back(AI.getCoerceToType()); 491 break; 492 493 case ABIArgInfo::Indirect: { 494 // indirect arguments are always on the stack, which is addr space #0. 495 const llvm::Type *LTy = ConvertTypeForMem(it->type); 496 ArgTys.push_back(llvm::PointerType::getUnqual(LTy)); 497 break; 498 } 499 500 case ABIArgInfo::Extend: 501 case ABIArgInfo::Direct: 502 ArgTys.push_back(ConvertType(it->type)); 503 break; 504 505 case ABIArgInfo::Expand: 506 GetExpandedTypes(it->type, ArgTys); 507 break; 508 } 509 } 510 511 return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic); 512} 513 514static bool HasIncompleteReturnTypeOrArgumentTypes(const FunctionProtoType *T) { 515 if (const TagType *TT = T->getResultType()->getAs<TagType>()) { 516 if (!TT->getDecl()->isDefinition()) 517 return true; 518 } 519 520 for (unsigned i = 0, e = T->getNumArgs(); i != e; ++i) { 521 if (const TagType *TT = T->getArgType(i)->getAs<TagType>()) { 522 if (!TT->getDecl()->isDefinition()) 523 return true; 524 } 525 } 526 527 return false; 528} 529 530const llvm::Type * 531CodeGenTypes::GetFunctionTypeForVTable(const CXXMethodDecl *MD) { 532 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 533 534 if (!HasIncompleteReturnTypeOrArgumentTypes(FPT)) 535 return GetFunctionType(getFunctionInfo(MD), FPT->isVariadic()); 536 537 return llvm::OpaqueType::get(getLLVMContext()); 538} 539 540void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, 541 const Decl *TargetDecl, 542 AttributeListType &PAL, 543 unsigned &CallingConv) { 544 unsigned FuncAttrs = 0; 545 unsigned RetAttrs = 0; 546 547 CallingConv = FI.getEffectiveCallingConvention(); 548 549 if (FI.isNoReturn()) 550 FuncAttrs |= llvm::Attribute::NoReturn; 551 552 // FIXME: handle sseregparm someday... 553 if (TargetDecl) { 554 if (TargetDecl->hasAttr<NoThrowAttr>()) 555 FuncAttrs |= llvm::Attribute::NoUnwind; 556 if (TargetDecl->hasAttr<NoReturnAttr>()) 557 FuncAttrs |= llvm::Attribute::NoReturn; 558 if (TargetDecl->hasAttr<ConstAttr>()) 559 FuncAttrs |= llvm::Attribute::ReadNone; 560 else if (TargetDecl->hasAttr<PureAttr>()) 561 FuncAttrs |= llvm::Attribute::ReadOnly; 562 if (TargetDecl->hasAttr<MallocAttr>()) 563 RetAttrs |= llvm::Attribute::NoAlias; 564 } 565 566 if (CodeGenOpts.OptimizeSize) 567 FuncAttrs |= llvm::Attribute::OptimizeForSize; 568 if (CodeGenOpts.DisableRedZone) 569 FuncAttrs |= llvm::Attribute::NoRedZone; 570 if (CodeGenOpts.NoImplicitFloat) 571 FuncAttrs |= llvm::Attribute::NoImplicitFloat; 572 573 QualType RetTy = FI.getReturnType(); 574 unsigned Index = 1; 575 const ABIArgInfo &RetAI = FI.getReturnInfo(); 576 switch (RetAI.getKind()) { 577 case ABIArgInfo::Extend: 578 if (RetTy->isSignedIntegerType()) { 579 RetAttrs |= llvm::Attribute::SExt; 580 } else if (RetTy->isUnsignedIntegerType()) { 581 RetAttrs |= llvm::Attribute::ZExt; 582 } 583 // FALLTHROUGH 584 case ABIArgInfo::Direct: 585 break; 586 587 case ABIArgInfo::Indirect: 588 PAL.push_back(llvm::AttributeWithIndex::get(Index, 589 llvm::Attribute::StructRet | 590 llvm::Attribute::NoAlias)); 591 ++Index; 592 // sret disables readnone and readonly 593 FuncAttrs &= ~(llvm::Attribute::ReadOnly | 594 llvm::Attribute::ReadNone); 595 break; 596 597 case ABIArgInfo::Ignore: 598 case ABIArgInfo::Coerce: 599 break; 600 601 case ABIArgInfo::Expand: 602 assert(0 && "Invalid ABI kind for return argument"); 603 } 604 605 if (RetAttrs) 606 PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs)); 607 608 // FIXME: we need to honour command line settings also... 609 // FIXME: RegParm should be reduced in case of nested functions and/or global 610 // register variable. 611 signed RegParm = FI.getRegParm(); 612 613 unsigned PointerWidth = getContext().Target.getPointerWidth(0); 614 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 615 ie = FI.arg_end(); it != ie; ++it) { 616 QualType ParamType = it->type; 617 const ABIArgInfo &AI = it->info; 618 unsigned Attributes = 0; 619 620 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 621 // have the corresponding parameter variable. It doesn't make 622 // sense to do it here because parameters are so fucked up. 623 624 switch (AI.getKind()) { 625 case ABIArgInfo::Coerce: 626 break; 627 628 case ABIArgInfo::Indirect: 629 if (AI.getIndirectByVal()) 630 Attributes |= llvm::Attribute::ByVal; 631 632 Attributes |= 633 llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign()); 634 // byval disables readnone and readonly. 635 FuncAttrs &= ~(llvm::Attribute::ReadOnly | 636 llvm::Attribute::ReadNone); 637 break; 638 639 case ABIArgInfo::Extend: 640 if (ParamType->isSignedIntegerType()) { 641 Attributes |= llvm::Attribute::SExt; 642 } else if (ParamType->isUnsignedIntegerType()) { 643 Attributes |= llvm::Attribute::ZExt; 644 } 645 // FALLS THROUGH 646 case ABIArgInfo::Direct: 647 if (RegParm > 0 && 648 (ParamType->isIntegerType() || ParamType->isPointerType())) { 649 RegParm -= 650 (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth; 651 if (RegParm >= 0) 652 Attributes |= llvm::Attribute::InReg; 653 } 654 // FIXME: handle sseregparm someday... 655 break; 656 657 case ABIArgInfo::Ignore: 658 // Skip increment, no matching LLVM parameter. 659 continue; 660 661 case ABIArgInfo::Expand: { 662 std::vector<const llvm::Type*> Tys; 663 // FIXME: This is rather inefficient. Do we ever actually need to do 664 // anything here? The result should be just reconstructed on the other 665 // side, so extension should be a non-issue. 666 getTypes().GetExpandedTypes(ParamType, Tys); 667 Index += Tys.size(); 668 continue; 669 } 670 } 671 672 if (Attributes) 673 PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes)); 674 ++Index; 675 } 676 if (FuncAttrs) 677 PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs)); 678} 679 680void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 681 llvm::Function *Fn, 682 const FunctionArgList &Args) { 683 // If this is an implicit-return-zero function, go ahead and 684 // initialize the return value. TODO: it might be nice to have 685 // a more general mechanism for this that didn't require synthesized 686 // return statements. 687 if (const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) { 688 if (FD->hasImplicitReturnZero()) { 689 QualType RetTy = FD->getResultType().getUnqualifiedType(); 690 const llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 691 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 692 Builder.CreateStore(Zero, ReturnValue); 693 } 694 } 695 696 // FIXME: We no longer need the types from FunctionArgList; lift up and 697 // simplify. 698 699 // Emit allocs for param decls. Give the LLVM Argument nodes names. 700 llvm::Function::arg_iterator AI = Fn->arg_begin(); 701 702 // Name the struct return argument. 703 if (CGM.ReturnTypeUsesSret(FI)) { 704 AI->setName("agg.result"); 705 ++AI; 706 } 707 708 assert(FI.arg_size() == Args.size() && 709 "Mismatch between function signature & arguments."); 710 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 711 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 712 i != e; ++i, ++info_it) { 713 const VarDecl *Arg = i->first; 714 QualType Ty = info_it->type; 715 const ABIArgInfo &ArgI = info_it->info; 716 717 switch (ArgI.getKind()) { 718 case ABIArgInfo::Indirect: { 719 llvm::Value* V = AI; 720 if (hasAggregateLLVMType(Ty)) { 721 // Do nothing, aggregates and complex variables are accessed by 722 // reference. 723 } else { 724 // Load scalar value from indirect argument. 725 V = EmitLoadOfScalar(V, false, Ty); 726 if (!getContext().typesAreCompatible(Ty, Arg->getType())) { 727 // This must be a promotion, for something like 728 // "void a(x) short x; {..." 729 V = EmitScalarConversion(V, Ty, Arg->getType()); 730 } 731 } 732 EmitParmDecl(*Arg, V); 733 break; 734 } 735 736 case ABIArgInfo::Extend: 737 case ABIArgInfo::Direct: { 738 assert(AI != Fn->arg_end() && "Argument mismatch!"); 739 llvm::Value* V = AI; 740 if (hasAggregateLLVMType(Ty)) { 741 // Create a temporary alloca to hold the argument; the rest of 742 // codegen expects to access aggregates & complex values by 743 // reference. 744 V = CreateMemTemp(Ty); 745 Builder.CreateStore(AI, V); 746 } else { 747 if (Arg->getType().isRestrictQualified()) 748 AI->addAttr(llvm::Attribute::NoAlias); 749 750 if (!getContext().typesAreCompatible(Ty, Arg->getType())) { 751 // This must be a promotion, for something like 752 // "void a(x) short x; {..." 753 V = EmitScalarConversion(V, Ty, Arg->getType()); 754 } 755 } 756 EmitParmDecl(*Arg, V); 757 break; 758 } 759 760 case ABIArgInfo::Expand: { 761 // If this structure was expanded into multiple arguments then 762 // we need to create a temporary and reconstruct it from the 763 // arguments. 764 llvm::Value *Temp = CreateMemTemp(Ty, Arg->getName() + ".addr"); 765 // FIXME: What are the right qualifiers here? 766 llvm::Function::arg_iterator End = 767 ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp, Qualifiers()), AI); 768 EmitParmDecl(*Arg, Temp); 769 770 // Name the arguments used in expansion and increment AI. 771 unsigned Index = 0; 772 for (; AI != End; ++AI, ++Index) 773 AI->setName(Arg->getName() + "." + llvm::Twine(Index)); 774 continue; 775 } 776 777 case ABIArgInfo::Ignore: 778 // Initialize the local variable appropriately. 779 if (hasAggregateLLVMType(Ty)) { 780 EmitParmDecl(*Arg, CreateMemTemp(Ty)); 781 } else { 782 EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType()))); 783 } 784 785 // Skip increment, no matching LLVM parameter. 786 continue; 787 788 case ABIArgInfo::Coerce: { 789 assert(AI != Fn->arg_end() && "Argument mismatch!"); 790 // FIXME: This is very wasteful; EmitParmDecl is just going to drop the 791 // result in a new alloca anyway, so we could just store into that 792 // directly if we broke the abstraction down more. 793 llvm::Value *V = CreateMemTemp(Ty, "coerce"); 794 CreateCoercedStore(AI, V, /*DestIsVolatile=*/false, *this); 795 // Match to what EmitParmDecl is expecting for this type. 796 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) { 797 V = EmitLoadOfScalar(V, false, Ty); 798 if (!getContext().typesAreCompatible(Ty, Arg->getType())) { 799 // This must be a promotion, for something like 800 // "void a(x) short x; {..." 801 V = EmitScalarConversion(V, Ty, Arg->getType()); 802 } 803 } 804 EmitParmDecl(*Arg, V); 805 break; 806 } 807 } 808 809 ++AI; 810 } 811 assert(AI == Fn->arg_end() && "Argument mismatch!"); 812} 813 814void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, 815 llvm::Value *ReturnValue) { 816 llvm::Value *RV = 0; 817 818 // Functions with no result always return void. 819 if (ReturnValue) { 820 QualType RetTy = FI.getReturnType(); 821 const ABIArgInfo &RetAI = FI.getReturnInfo(); 822 823 switch (RetAI.getKind()) { 824 case ABIArgInfo::Indirect: 825 if (RetTy->isAnyComplexType()) { 826 ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false); 827 StoreComplexToAddr(RT, CurFn->arg_begin(), false); 828 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 829 // Do nothing; aggregrates get evaluated directly into the destination. 830 } else { 831 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(), 832 false, RetTy); 833 } 834 break; 835 836 case ABIArgInfo::Extend: 837 case ABIArgInfo::Direct: 838 // The internal return value temp always will have 839 // pointer-to-return-type type. 840 RV = Builder.CreateLoad(ReturnValue); 841 break; 842 843 case ABIArgInfo::Ignore: 844 break; 845 846 case ABIArgInfo::Coerce: 847 RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this); 848 break; 849 850 case ABIArgInfo::Expand: 851 assert(0 && "Invalid ABI kind for return argument"); 852 } 853 } 854 855 if (RV) { 856 Builder.CreateRet(RV); 857 } else { 858 Builder.CreateRetVoid(); 859 } 860} 861 862RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) { 863 if (ArgType->isReferenceType()) 864 return EmitReferenceBindingToExpr(E); 865 866 return EmitAnyExprToTemp(E); 867} 868 869RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 870 llvm::Value *Callee, 871 ReturnValueSlot ReturnValue, 872 const CallArgList &CallArgs, 873 const Decl *TargetDecl) { 874 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 875 llvm::SmallVector<llvm::Value*, 16> Args; 876 877 // Handle struct-return functions by passing a pointer to the 878 // location that we would like to return into. 879 QualType RetTy = CallInfo.getReturnType(); 880 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 881 882 883 // If the call returns a temporary with struct return, create a temporary 884 // alloca to hold the result, unless one is given to us. 885 if (CGM.ReturnTypeUsesSret(CallInfo)) { 886 llvm::Value *Value = ReturnValue.getValue(); 887 if (!Value) 888 Value = CreateMemTemp(RetTy); 889 Args.push_back(Value); 890 } 891 892 assert(CallInfo.arg_size() == CallArgs.size() && 893 "Mismatch between function signature & arguments."); 894 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 895 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 896 I != E; ++I, ++info_it) { 897 const ABIArgInfo &ArgInfo = info_it->info; 898 RValue RV = I->first; 899 900 switch (ArgInfo.getKind()) { 901 case ABIArgInfo::Indirect: 902 if (RV.isScalar() || RV.isComplex()) { 903 // Make a temporary alloca to pass the argument. 904 Args.push_back(CreateMemTemp(I->second)); 905 if (RV.isScalar()) 906 EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, I->second); 907 else 908 StoreComplexToAddr(RV.getComplexVal(), Args.back(), false); 909 } else { 910 Args.push_back(RV.getAggregateAddr()); 911 } 912 break; 913 914 case ABIArgInfo::Extend: 915 case ABIArgInfo::Direct: 916 if (RV.isScalar()) { 917 Args.push_back(RV.getScalarVal()); 918 } else if (RV.isComplex()) { 919 llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second)); 920 Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0); 921 Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1); 922 Args.push_back(Tmp); 923 } else { 924 Args.push_back(Builder.CreateLoad(RV.getAggregateAddr())); 925 } 926 break; 927 928 case ABIArgInfo::Ignore: 929 break; 930 931 case ABIArgInfo::Coerce: { 932 // FIXME: Avoid the conversion through memory if possible. 933 llvm::Value *SrcPtr; 934 if (RV.isScalar()) { 935 SrcPtr = CreateMemTemp(I->second, "coerce"); 936 EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, I->second); 937 } else if (RV.isComplex()) { 938 SrcPtr = CreateMemTemp(I->second, "coerce"); 939 StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false); 940 } else 941 SrcPtr = RV.getAggregateAddr(); 942 Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), 943 *this)); 944 break; 945 } 946 947 case ABIArgInfo::Expand: 948 ExpandTypeToArgs(I->second, RV, Args); 949 break; 950 } 951 } 952 953 // If the callee is a bitcast of a function to a varargs pointer to function 954 // type, check to see if we can remove the bitcast. This handles some cases 955 // with unprototyped functions. 956 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee)) 957 if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) { 958 const llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType()); 959 const llvm::FunctionType *CurFT = 960 cast<llvm::FunctionType>(CurPT->getElementType()); 961 const llvm::FunctionType *ActualFT = CalleeF->getFunctionType(); 962 963 if (CE->getOpcode() == llvm::Instruction::BitCast && 964 ActualFT->getReturnType() == CurFT->getReturnType() && 965 ActualFT->getNumParams() == CurFT->getNumParams() && 966 ActualFT->getNumParams() == Args.size()) { 967 bool ArgsMatch = true; 968 for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i) 969 if (ActualFT->getParamType(i) != CurFT->getParamType(i)) { 970 ArgsMatch = false; 971 break; 972 } 973 974 // Strip the cast if we can get away with it. This is a nice cleanup, 975 // but also allows us to inline the function at -O0 if it is marked 976 // always_inline. 977 if (ArgsMatch) 978 Callee = CalleeF; 979 } 980 } 981 982 983 llvm::BasicBlock *InvokeDest = getInvokeDest(); 984 unsigned CallingConv; 985 CodeGen::AttributeListType AttributeList; 986 CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv); 987 llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(), 988 AttributeList.end()); 989 990 llvm::CallSite CS; 991 if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) { 992 CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size()); 993 } else { 994 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 995 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, 996 Args.data(), Args.data()+Args.size()); 997 EmitBlock(Cont); 998 } 999 1000 CS.setAttributes(Attrs); 1001 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 1002 1003 // If the call doesn't return, finish the basic block and clear the 1004 // insertion point; this allows the rest of IRgen to discard 1005 // unreachable code. 1006 if (CS.doesNotReturn()) { 1007 Builder.CreateUnreachable(); 1008 Builder.ClearInsertionPoint(); 1009 1010 // FIXME: For now, emit a dummy basic block because expr emitters in 1011 // generally are not ready to handle emitting expressions at unreachable 1012 // points. 1013 EnsureInsertPoint(); 1014 1015 // Return a reasonable RValue. 1016 return GetUndefRValue(RetTy); 1017 } 1018 1019 llvm::Instruction *CI = CS.getInstruction(); 1020 if (Builder.isNamePreserving() && !CI->getType()->isVoidTy()) 1021 CI->setName("call"); 1022 1023 switch (RetAI.getKind()) { 1024 case ABIArgInfo::Indirect: 1025 if (RetTy->isAnyComplexType()) 1026 return RValue::getComplex(LoadComplexFromAddr(Args[0], false)); 1027 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) 1028 return RValue::getAggregate(Args[0]); 1029 return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy)); 1030 1031 case ABIArgInfo::Extend: 1032 case ABIArgInfo::Direct: 1033 if (RetTy->isAnyComplexType()) { 1034 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 1035 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 1036 return RValue::getComplex(std::make_pair(Real, Imag)); 1037 } 1038 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 1039 llvm::Value *DestPtr = ReturnValue.getValue(); 1040 bool DestIsVolatile = ReturnValue.isVolatile(); 1041 1042 if (!DestPtr) { 1043 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 1044 DestIsVolatile = false; 1045 } 1046 Builder.CreateStore(CI, DestPtr, DestIsVolatile); 1047 return RValue::getAggregate(DestPtr); 1048 } 1049 return RValue::get(CI); 1050 1051 case ABIArgInfo::Ignore: 1052 // If we are ignoring an argument that had a result, make sure to 1053 // construct the appropriate return value for our caller. 1054 return GetUndefRValue(RetTy); 1055 1056 case ABIArgInfo::Coerce: { 1057 llvm::Value *DestPtr = ReturnValue.getValue(); 1058 bool DestIsVolatile = ReturnValue.isVolatile(); 1059 1060 if (!DestPtr) { 1061 DestPtr = CreateMemTemp(RetTy, "coerce"); 1062 DestIsVolatile = false; 1063 } 1064 1065 CreateCoercedStore(CI, DestPtr, DestIsVolatile, *this); 1066 if (RetTy->isAnyComplexType()) 1067 return RValue::getComplex(LoadComplexFromAddr(DestPtr, false)); 1068 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) 1069 return RValue::getAggregate(DestPtr); 1070 return RValue::get(EmitLoadOfScalar(DestPtr, false, RetTy)); 1071 } 1072 1073 case ABIArgInfo::Expand: 1074 assert(0 && "Invalid ABI kind for return argument"); 1075 } 1076 1077 assert(0 && "Unhandled ABIArgInfo::Kind"); 1078 return RValue::get(0); 1079} 1080 1081/* VarArg handling */ 1082 1083llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) { 1084 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this); 1085} 1086