CGCall.cpp revision b5a03b6217cd6c3eecdc1aa4df03060cf51ac942
1//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// These classes wrap the information about a call or function 11// definition used to handle ABI compliancy. 12// 13//===----------------------------------------------------------------------===// 14 15#include "CGCall.h" 16#include "CGCXXABI.h" 17#include "ABIInfo.h" 18#include "CodeGenFunction.h" 19#include "CodeGenModule.h" 20#include "clang/Basic/TargetInfo.h" 21#include "clang/AST/Decl.h" 22#include "clang/AST/DeclCXX.h" 23#include "clang/AST/DeclObjC.h" 24#include "clang/Frontend/CodeGenOptions.h" 25#include "llvm/Attributes.h" 26#include "llvm/Support/CallSite.h" 27#include "llvm/Target/TargetData.h" 28using namespace clang; 29using namespace CodeGen; 30 31/***/ 32 33static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) { 34 switch (CC) { 35 default: return llvm::CallingConv::C; 36 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 37 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 38 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 39 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; 40 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 41 // TODO: add support for CC_X86Pascal to llvm 42 } 43} 44 45/// Derives the 'this' type for codegen purposes, i.e. ignoring method 46/// qualification. 47/// FIXME: address space qualification? 48static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) { 49 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 50 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 51} 52 53/// Returns the canonical formal type of the given C++ method. 54static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 55 return MD->getType()->getCanonicalTypeUnqualified() 56 .getAs<FunctionProtoType>(); 57} 58 59/// Returns the "extra-canonicalized" return type, which discards 60/// qualifiers on the return type. Codegen doesn't care about them, 61/// and it makes ABI code a little easier to be able to assume that 62/// all parameter and return types are top-level unqualified. 63static CanQualType GetReturnType(QualType RetTy) { 64 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 65} 66 67const CGFunctionInfo & 68CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP, 69 bool IsRecursive) { 70 return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(), 71 llvm::SmallVector<CanQualType, 16>(), 72 FTNP->getExtInfo(), IsRecursive); 73} 74 75/// \param Args - contains any initial parameters besides those 76/// in the formal type 77static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT, 78 llvm::SmallVectorImpl<CanQualType> &ArgTys, 79 CanQual<FunctionProtoType> FTP, 80 bool IsRecursive = false) { 81 // FIXME: Kill copy. 82 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) 83 ArgTys.push_back(FTP->getArgType(i)); 84 CanQualType ResTy = FTP->getResultType().getUnqualifiedType(); 85 return CGT.getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo(), IsRecursive); 86} 87 88const CGFunctionInfo & 89CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP, 90 bool IsRecursive) { 91 llvm::SmallVector<CanQualType, 16> ArgTys; 92 return ::getFunctionInfo(*this, ArgTys, FTP, IsRecursive); 93} 94 95static CallingConv getCallingConventionForDecl(const Decl *D) { 96 // Set the appropriate calling convention for the Function. 97 if (D->hasAttr<StdCallAttr>()) 98 return CC_X86StdCall; 99 100 if (D->hasAttr<FastCallAttr>()) 101 return CC_X86FastCall; 102 103 if (D->hasAttr<ThisCallAttr>()) 104 return CC_X86ThisCall; 105 106 if (D->hasAttr<PascalAttr>()) 107 return CC_X86Pascal; 108 109 if (PcsAttr *PCS = D->getAttr<PcsAttr>()) 110 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); 111 112 return CC_C; 113} 114 115const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD, 116 const FunctionProtoType *FTP) { 117 llvm::SmallVector<CanQualType, 16> ArgTys; 118 119 // Add the 'this' pointer. 120 ArgTys.push_back(GetThisType(Context, RD)); 121 122 return ::getFunctionInfo(*this, ArgTys, 123 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); 124} 125 126const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) { 127 llvm::SmallVector<CanQualType, 16> ArgTys; 128 129 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!"); 130 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 131 132 // Add the 'this' pointer unless this is a static method. 133 if (MD->isInstance()) 134 ArgTys.push_back(GetThisType(Context, MD->getParent())); 135 136 return ::getFunctionInfo(*this, ArgTys, GetFormalType(MD)); 137} 138 139const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D, 140 CXXCtorType Type) { 141 llvm::SmallVector<CanQualType, 16> ArgTys; 142 ArgTys.push_back(GetThisType(Context, D->getParent())); 143 CanQualType ResTy = Context.VoidTy; 144 145 TheCXXABI.BuildConstructorSignature(D, Type, ResTy, ArgTys); 146 147 CanQual<FunctionProtoType> FTP = GetFormalType(D); 148 149 // Add the formal parameters. 150 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) 151 ArgTys.push_back(FTP->getArgType(i)); 152 153 return getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo()); 154} 155 156const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXDestructorDecl *D, 157 CXXDtorType Type) { 158 llvm::SmallVector<CanQualType, 2> ArgTys; 159 ArgTys.push_back(GetThisType(Context, D->getParent())); 160 CanQualType ResTy = Context.VoidTy; 161 162 TheCXXABI.BuildDestructorSignature(D, Type, ResTy, ArgTys); 163 164 CanQual<FunctionProtoType> FTP = GetFormalType(D); 165 assert(FTP->getNumArgs() == 0 && "dtor with formal parameters"); 166 167 return getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo()); 168} 169 170const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) { 171 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 172 if (MD->isInstance()) 173 return getFunctionInfo(MD); 174 175 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 176 assert(isa<FunctionType>(FTy)); 177 if (isa<FunctionNoProtoType>(FTy)) 178 return getFunctionInfo(FTy.getAs<FunctionNoProtoType>()); 179 assert(isa<FunctionProtoType>(FTy)); 180 return getFunctionInfo(FTy.getAs<FunctionProtoType>()); 181} 182 183const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) { 184 llvm::SmallVector<CanQualType, 16> ArgTys; 185 ArgTys.push_back(Context.getCanonicalParamType(MD->getSelfDecl()->getType())); 186 ArgTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 187 // FIXME: Kill copy? 188 for (ObjCMethodDecl::param_iterator i = MD->param_begin(), 189 e = MD->param_end(); i != e; ++i) { 190 ArgTys.push_back(Context.getCanonicalParamType((*i)->getType())); 191 } 192 return getFunctionInfo(GetReturnType(MD->getResultType()), 193 ArgTys, 194 FunctionType::ExtInfo( 195 /*NoReturn*/ false, 196 /*HasRegParm*/ false, 197 /*RegParm*/ 0, 198 getCallingConventionForDecl(MD))); 199} 200 201const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) { 202 // FIXME: Do we need to handle ObjCMethodDecl? 203 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 204 205 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 206 return getFunctionInfo(CD, GD.getCtorType()); 207 208 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) 209 return getFunctionInfo(DD, GD.getDtorType()); 210 211 return getFunctionInfo(FD); 212} 213 214const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy, 215 const CallArgList &Args, 216 const FunctionType::ExtInfo &Info) { 217 // FIXME: Kill copy. 218 llvm::SmallVector<CanQualType, 16> ArgTys; 219 for (CallArgList::const_iterator i = Args.begin(), e = Args.end(); 220 i != e; ++i) 221 ArgTys.push_back(Context.getCanonicalParamType(i->Ty)); 222 return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info); 223} 224 225const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy, 226 const FunctionArgList &Args, 227 const FunctionType::ExtInfo &Info) { 228 // FIXME: Kill copy. 229 llvm::SmallVector<CanQualType, 16> ArgTys; 230 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 231 i != e; ++i) 232 ArgTys.push_back(Context.getCanonicalParamType((*i)->getType())); 233 return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info); 234} 235 236const CGFunctionInfo &CodeGenTypes::getNullaryFunctionInfo() { 237 llvm::SmallVector<CanQualType, 1> args; 238 return getFunctionInfo(getContext().VoidTy, args, FunctionType::ExtInfo()); 239} 240 241const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy, 242 const llvm::SmallVectorImpl<CanQualType> &ArgTys, 243 const FunctionType::ExtInfo &Info, 244 bool IsRecursive) { 245#ifndef NDEBUG 246 for (llvm::SmallVectorImpl<CanQualType>::const_iterator 247 I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I) 248 assert(I->isCanonicalAsParam()); 249#endif 250 251 unsigned CC = ClangCallConvToLLVMCallConv(Info.getCC()); 252 253 // Lookup or create unique function info. 254 llvm::FoldingSetNodeID ID; 255 CGFunctionInfo::Profile(ID, Info, ResTy, 256 ArgTys.begin(), ArgTys.end()); 257 258 void *InsertPos = 0; 259 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos); 260 if (FI) 261 return *FI; 262 263 // Construct the function info. 264 FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getHasRegParm(), Info.getRegParm(), ResTy, 265 ArgTys.data(), ArgTys.size()); 266 FunctionInfos.InsertNode(FI, InsertPos); 267 268 // Compute ABI information. 269 getABIInfo().computeInfo(*FI); 270 271 // Loop over all of the computed argument and return value info. If any of 272 // them are direct or extend without a specified coerce type, specify the 273 // default now. 274 ABIArgInfo &RetInfo = FI->getReturnInfo(); 275 if (RetInfo.canHaveCoerceToType() && RetInfo.getCoerceToType() == 0) 276 RetInfo.setCoerceToType(ConvertTypeRecursive(FI->getReturnType())); 277 278 for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end(); 279 I != E; ++I) 280 if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0) 281 I->info.setCoerceToType(ConvertTypeRecursive(I->type)); 282 283 // If this is a top-level call and ConvertTypeRecursive hit unresolved pointer 284 // types, resolve them now. These pointers may point to this function, which 285 // we *just* filled in the FunctionInfo for. 286 if (!IsRecursive && !PointersToResolve.empty()) 287 HandleLateResolvedPointers(); 288 289 return *FI; 290} 291 292CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention, 293 bool _NoReturn, bool _HasRegParm, unsigned _RegParm, 294 CanQualType ResTy, 295 const CanQualType *ArgTys, 296 unsigned NumArgTys) 297 : CallingConvention(_CallingConvention), 298 EffectiveCallingConvention(_CallingConvention), 299 NoReturn(_NoReturn), HasRegParm(_HasRegParm), RegParm(_RegParm) 300{ 301 NumArgs = NumArgTys; 302 303 // FIXME: Coallocate with the CGFunctionInfo object. 304 Args = new ArgInfo[1 + NumArgTys]; 305 Args[0].type = ResTy; 306 for (unsigned i = 0; i != NumArgTys; ++i) 307 Args[1 + i].type = ArgTys[i]; 308} 309 310/***/ 311 312void CodeGenTypes::GetExpandedTypes(QualType Ty, 313 std::vector<const llvm::Type*> &ArgTys, 314 bool IsRecursive) { 315 const RecordType *RT = Ty->getAsStructureType(); 316 assert(RT && "Can only expand structure types."); 317 const RecordDecl *RD = RT->getDecl(); 318 assert(!RD->hasFlexibleArrayMember() && 319 "Cannot expand structure with flexible array."); 320 321 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 322 i != e; ++i) { 323 const FieldDecl *FD = *i; 324 assert(!FD->isBitField() && 325 "Cannot expand structure with bit-field members."); 326 327 QualType FT = FD->getType(); 328 if (CodeGenFunction::hasAggregateLLVMType(FT)) 329 GetExpandedTypes(FT, ArgTys, IsRecursive); 330 else 331 ArgTys.push_back(ConvertType(FT, IsRecursive)); 332 } 333} 334 335llvm::Function::arg_iterator 336CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, 337 llvm::Function::arg_iterator AI) { 338 const RecordType *RT = Ty->getAsStructureType(); 339 assert(RT && "Can only expand structure types."); 340 341 RecordDecl *RD = RT->getDecl(); 342 assert(LV.isSimple() && 343 "Unexpected non-simple lvalue during struct expansion."); 344 llvm::Value *Addr = LV.getAddress(); 345 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 346 i != e; ++i) { 347 FieldDecl *FD = *i; 348 QualType FT = FD->getType(); 349 350 // FIXME: What are the right qualifiers here? 351 LValue LV = EmitLValueForField(Addr, FD, 0); 352 if (CodeGenFunction::hasAggregateLLVMType(FT)) { 353 AI = ExpandTypeFromArgs(FT, LV, AI); 354 } else { 355 EmitStoreThroughLValue(RValue::get(AI), LV, FT); 356 ++AI; 357 } 358 } 359 360 return AI; 361} 362 363void 364CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV, 365 llvm::SmallVector<llvm::Value*, 16> &Args) { 366 const RecordType *RT = Ty->getAsStructureType(); 367 assert(RT && "Can only expand structure types."); 368 369 RecordDecl *RD = RT->getDecl(); 370 assert(RV.isAggregate() && "Unexpected rvalue during struct expansion"); 371 llvm::Value *Addr = RV.getAggregateAddr(); 372 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 373 i != e; ++i) { 374 FieldDecl *FD = *i; 375 QualType FT = FD->getType(); 376 377 // FIXME: What are the right qualifiers here? 378 LValue LV = EmitLValueForField(Addr, FD, 0); 379 if (CodeGenFunction::hasAggregateLLVMType(FT)) { 380 ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args); 381 } else { 382 RValue RV = EmitLoadOfLValue(LV, FT); 383 assert(RV.isScalar() && 384 "Unexpected non-scalar rvalue during struct expansion."); 385 Args.push_back(RV.getScalarVal()); 386 } 387 } 388} 389 390/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 391/// accessing some number of bytes out of it, try to gep into the struct to get 392/// at its inner goodness. Dive as deep as possible without entering an element 393/// with an in-memory size smaller than DstSize. 394static llvm::Value * 395EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr, 396 const llvm::StructType *SrcSTy, 397 uint64_t DstSize, CodeGenFunction &CGF) { 398 // We can't dive into a zero-element struct. 399 if (SrcSTy->getNumElements() == 0) return SrcPtr; 400 401 const llvm::Type *FirstElt = SrcSTy->getElementType(0); 402 403 // If the first elt is at least as large as what we're looking for, or if the 404 // first element is the same size as the whole struct, we can enter it. 405 uint64_t FirstEltSize = 406 CGF.CGM.getTargetData().getTypeAllocSize(FirstElt); 407 if (FirstEltSize < DstSize && 408 FirstEltSize < CGF.CGM.getTargetData().getTypeAllocSize(SrcSTy)) 409 return SrcPtr; 410 411 // GEP into the first element. 412 SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive"); 413 414 // If the first element is a struct, recurse. 415 const llvm::Type *SrcTy = 416 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 417 if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 418 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 419 420 return SrcPtr; 421} 422 423/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 424/// are either integers or pointers. This does a truncation of the value if it 425/// is too large or a zero extension if it is too small. 426static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 427 const llvm::Type *Ty, 428 CodeGenFunction &CGF) { 429 if (Val->getType() == Ty) 430 return Val; 431 432 if (isa<llvm::PointerType>(Val->getType())) { 433 // If this is Pointer->Pointer avoid conversion to and from int. 434 if (isa<llvm::PointerType>(Ty)) 435 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 436 437 // Convert the pointer to an integer so we can play with its width. 438 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 439 } 440 441 const llvm::Type *DestIntTy = Ty; 442 if (isa<llvm::PointerType>(DestIntTy)) 443 DestIntTy = CGF.IntPtrTy; 444 445 if (Val->getType() != DestIntTy) 446 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 447 448 if (isa<llvm::PointerType>(Ty)) 449 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 450 return Val; 451} 452 453 454 455/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 456/// a pointer to an object of type \arg Ty. 457/// 458/// This safely handles the case when the src type is smaller than the 459/// destination type; in this situation the values of bits which not 460/// present in the src are undefined. 461static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr, 462 const llvm::Type *Ty, 463 CodeGenFunction &CGF) { 464 const llvm::Type *SrcTy = 465 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 466 467 // If SrcTy and Ty are the same, just do a load. 468 if (SrcTy == Ty) 469 return CGF.Builder.CreateLoad(SrcPtr); 470 471 uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty); 472 473 if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 474 SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 475 SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 476 } 477 478 uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy); 479 480 // If the source and destination are integer or pointer types, just do an 481 // extension or truncation to the desired type. 482 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 483 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 484 llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr); 485 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 486 } 487 488 // If load is legal, just bitcast the src pointer. 489 if (SrcSize >= DstSize) { 490 // Generally SrcSize is never greater than DstSize, since this means we are 491 // losing bits. However, this can happen in cases where the structure has 492 // additional padding, for example due to a user specified alignment. 493 // 494 // FIXME: Assert that we aren't truncating non-padding bits when have access 495 // to that information. 496 llvm::Value *Casted = 497 CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty)); 498 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); 499 // FIXME: Use better alignment / avoid requiring aligned load. 500 Load->setAlignment(1); 501 return Load; 502 } 503 504 // Otherwise do coercion through memory. This is stupid, but 505 // simple. 506 llvm::Value *Tmp = CGF.CreateTempAlloca(Ty); 507 llvm::Value *Casted = 508 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy)); 509 llvm::StoreInst *Store = 510 CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted); 511 // FIXME: Use better alignment / avoid requiring aligned store. 512 Store->setAlignment(1); 513 return CGF.Builder.CreateLoad(Tmp); 514} 515 516/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 517/// where the source and destination may have different types. 518/// 519/// This safely handles the case when the src type is larger than the 520/// destination type; the upper bits of the src will be lost. 521static void CreateCoercedStore(llvm::Value *Src, 522 llvm::Value *DstPtr, 523 bool DstIsVolatile, 524 CodeGenFunction &CGF) { 525 const llvm::Type *SrcTy = Src->getType(); 526 const llvm::Type *DstTy = 527 cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 528 if (SrcTy == DstTy) { 529 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 530 return; 531 } 532 533 uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy); 534 535 if (const llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 536 DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF); 537 DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 538 } 539 540 // If the source and destination are integer or pointer types, just do an 541 // extension or truncation to the desired type. 542 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 543 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 544 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 545 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 546 return; 547 } 548 549 uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy); 550 551 // If store is legal, just bitcast the src pointer. 552 if (SrcSize <= DstSize) { 553 llvm::Value *Casted = 554 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy)); 555 // FIXME: Use better alignment / avoid requiring aligned store. 556 CGF.Builder.CreateStore(Src, Casted, DstIsVolatile)->setAlignment(1); 557 } else { 558 // Otherwise do coercion through memory. This is stupid, but 559 // simple. 560 561 // Generally SrcSize is never greater than DstSize, since this means we are 562 // losing bits. However, this can happen in cases where the structure has 563 // additional padding, for example due to a user specified alignment. 564 // 565 // FIXME: Assert that we aren't truncating non-padding bits when have access 566 // to that information. 567 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy); 568 CGF.Builder.CreateStore(Src, Tmp); 569 llvm::Value *Casted = 570 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy)); 571 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); 572 // FIXME: Use better alignment / avoid requiring aligned load. 573 Load->setAlignment(1); 574 CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile); 575 } 576} 577 578/***/ 579 580bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 581 return FI.getReturnInfo().isIndirect(); 582} 583 584bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 585 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 586 switch (BT->getKind()) { 587 default: 588 return false; 589 case BuiltinType::Float: 590 return getContext().Target.useObjCFPRetForRealType(TargetInfo::Float); 591 case BuiltinType::Double: 592 return getContext().Target.useObjCFPRetForRealType(TargetInfo::Double); 593 case BuiltinType::LongDouble: 594 return getContext().Target.useObjCFPRetForRealType( 595 TargetInfo::LongDouble); 596 } 597 } 598 599 return false; 600} 601 602const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 603 const CGFunctionInfo &FI = getFunctionInfo(GD); 604 605 // For definition purposes, don't consider a K&R function variadic. 606 bool Variadic = false; 607 if (const FunctionProtoType *FPT = 608 cast<FunctionDecl>(GD.getDecl())->getType()->getAs<FunctionProtoType>()) 609 Variadic = FPT->isVariadic(); 610 611 return GetFunctionType(FI, Variadic, false); 612} 613 614const llvm::FunctionType * 615CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic, 616 bool IsRecursive) { 617 std::vector<const llvm::Type*> ArgTys; 618 619 const llvm::Type *ResultType = 0; 620 621 QualType RetTy = FI.getReturnType(); 622 const ABIArgInfo &RetAI = FI.getReturnInfo(); 623 switch (RetAI.getKind()) { 624 case ABIArgInfo::Expand: 625 assert(0 && "Invalid ABI kind for return argument"); 626 627 case ABIArgInfo::Extend: 628 case ABIArgInfo::Direct: 629 ResultType = RetAI.getCoerceToType(); 630 break; 631 632 case ABIArgInfo::Indirect: { 633 assert(!RetAI.getIndirectAlign() && "Align unused on indirect return."); 634 ResultType = llvm::Type::getVoidTy(getLLVMContext()); 635 const llvm::Type *STy = ConvertType(RetTy, IsRecursive); 636 unsigned AS = Context.getTargetAddressSpace(RetTy); 637 ArgTys.push_back(llvm::PointerType::get(STy, AS)); 638 break; 639 } 640 641 case ABIArgInfo::Ignore: 642 ResultType = llvm::Type::getVoidTy(getLLVMContext()); 643 break; 644 } 645 646 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 647 ie = FI.arg_end(); it != ie; ++it) { 648 const ABIArgInfo &AI = it->info; 649 650 switch (AI.getKind()) { 651 case ABIArgInfo::Ignore: 652 break; 653 654 case ABIArgInfo::Indirect: { 655 // indirect arguments are always on the stack, which is addr space #0. 656 const llvm::Type *LTy = ConvertTypeForMem(it->type, IsRecursive); 657 ArgTys.push_back(llvm::PointerType::getUnqual(LTy)); 658 break; 659 } 660 661 case ABIArgInfo::Extend: 662 case ABIArgInfo::Direct: { 663 // If the coerce-to type is a first class aggregate, flatten it. Either 664 // way is semantically identical, but fast-isel and the optimizer 665 // generally likes scalar values better than FCAs. 666 const llvm::Type *ArgTy = AI.getCoerceToType(); 667 if (const llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgTy)) { 668 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) 669 ArgTys.push_back(STy->getElementType(i)); 670 } else { 671 ArgTys.push_back(ArgTy); 672 } 673 break; 674 } 675 676 case ABIArgInfo::Expand: 677 GetExpandedTypes(it->type, ArgTys, IsRecursive); 678 break; 679 } 680 } 681 682 return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic); 683} 684 685const llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 686 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 687 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 688 689 if (!VerifyFuncTypeComplete(FPT)) { 690 const CGFunctionInfo *Info; 691 if (isa<CXXDestructorDecl>(MD)) 692 Info = &getFunctionInfo(cast<CXXDestructorDecl>(MD), GD.getDtorType()); 693 else 694 Info = &getFunctionInfo(MD); 695 return GetFunctionType(*Info, FPT->isVariadic(), false); 696 } 697 698 return llvm::OpaqueType::get(getLLVMContext()); 699} 700 701void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, 702 const Decl *TargetDecl, 703 AttributeListType &PAL, 704 unsigned &CallingConv) { 705 unsigned FuncAttrs = 0; 706 unsigned RetAttrs = 0; 707 708 CallingConv = FI.getEffectiveCallingConvention(); 709 710 if (FI.isNoReturn()) 711 FuncAttrs |= llvm::Attribute::NoReturn; 712 713 // FIXME: handle sseregparm someday... 714 if (TargetDecl) { 715 if (TargetDecl->hasAttr<NoThrowAttr>()) 716 FuncAttrs |= llvm::Attribute::NoUnwind; 717 else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 718 const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>(); 719 if (FPT && FPT->isNothrow(getContext())) 720 FuncAttrs |= llvm::Attribute::NoUnwind; 721 } 722 723 if (TargetDecl->hasAttr<NoReturnAttr>()) 724 FuncAttrs |= llvm::Attribute::NoReturn; 725 if (TargetDecl->hasAttr<ConstAttr>()) 726 FuncAttrs |= llvm::Attribute::ReadNone; 727 else if (TargetDecl->hasAttr<PureAttr>()) 728 FuncAttrs |= llvm::Attribute::ReadOnly; 729 if (TargetDecl->hasAttr<MallocAttr>()) 730 RetAttrs |= llvm::Attribute::NoAlias; 731 } 732 733 if (CodeGenOpts.OptimizeSize) 734 FuncAttrs |= llvm::Attribute::OptimizeForSize; 735 if (CodeGenOpts.DisableRedZone) 736 FuncAttrs |= llvm::Attribute::NoRedZone; 737 if (CodeGenOpts.NoImplicitFloat) 738 FuncAttrs |= llvm::Attribute::NoImplicitFloat; 739 740 QualType RetTy = FI.getReturnType(); 741 unsigned Index = 1; 742 const ABIArgInfo &RetAI = FI.getReturnInfo(); 743 switch (RetAI.getKind()) { 744 case ABIArgInfo::Extend: 745 if (RetTy->hasSignedIntegerRepresentation()) 746 RetAttrs |= llvm::Attribute::SExt; 747 else if (RetTy->hasUnsignedIntegerRepresentation()) 748 RetAttrs |= llvm::Attribute::ZExt; 749 break; 750 case ABIArgInfo::Direct: 751 case ABIArgInfo::Ignore: 752 break; 753 754 case ABIArgInfo::Indirect: 755 PAL.push_back(llvm::AttributeWithIndex::get(Index, 756 llvm::Attribute::StructRet)); 757 ++Index; 758 // sret disables readnone and readonly 759 FuncAttrs &= ~(llvm::Attribute::ReadOnly | 760 llvm::Attribute::ReadNone); 761 break; 762 763 case ABIArgInfo::Expand: 764 assert(0 && "Invalid ABI kind for return argument"); 765 } 766 767 if (RetAttrs) 768 PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs)); 769 770 // FIXME: RegParm should be reduced in case of global register variable. 771 signed RegParm; 772 if (FI.getHasRegParm()) 773 RegParm = FI.getRegParm(); 774 else 775 RegParm = CodeGenOpts.NumRegisterParameters; 776 777 unsigned PointerWidth = getContext().Target.getPointerWidth(0); 778 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 779 ie = FI.arg_end(); it != ie; ++it) { 780 QualType ParamType = it->type; 781 const ABIArgInfo &AI = it->info; 782 unsigned Attributes = 0; 783 784 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 785 // have the corresponding parameter variable. It doesn't make 786 // sense to do it here because parameters are so messed up. 787 switch (AI.getKind()) { 788 case ABIArgInfo::Extend: 789 if (ParamType->isSignedIntegerType()) 790 Attributes |= llvm::Attribute::SExt; 791 else if (ParamType->isUnsignedIntegerType()) 792 Attributes |= llvm::Attribute::ZExt; 793 // FALL THROUGH 794 case ABIArgInfo::Direct: 795 if (RegParm > 0 && 796 (ParamType->isIntegerType() || ParamType->isPointerType())) { 797 RegParm -= 798 (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth; 799 if (RegParm >= 0) 800 Attributes |= llvm::Attribute::InReg; 801 } 802 // FIXME: handle sseregparm someday... 803 804 if (const llvm::StructType *STy = 805 dyn_cast<llvm::StructType>(AI.getCoerceToType())) 806 Index += STy->getNumElements()-1; // 1 will be added below. 807 break; 808 809 case ABIArgInfo::Indirect: 810 if (AI.getIndirectByVal()) 811 Attributes |= llvm::Attribute::ByVal; 812 813 Attributes |= 814 llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign()); 815 // byval disables readnone and readonly. 816 FuncAttrs &= ~(llvm::Attribute::ReadOnly | 817 llvm::Attribute::ReadNone); 818 break; 819 820 case ABIArgInfo::Ignore: 821 // Skip increment, no matching LLVM parameter. 822 continue; 823 824 case ABIArgInfo::Expand: { 825 std::vector<const llvm::Type*> Tys; 826 // FIXME: This is rather inefficient. Do we ever actually need to do 827 // anything here? The result should be just reconstructed on the other 828 // side, so extension should be a non-issue. 829 getTypes().GetExpandedTypes(ParamType, Tys, false); 830 Index += Tys.size(); 831 continue; 832 } 833 } 834 835 if (Attributes) 836 PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes)); 837 ++Index; 838 } 839 if (FuncAttrs) 840 PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs)); 841} 842 843/// An argument came in as a promoted argument; demote it back to its 844/// declared type. 845static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, 846 const VarDecl *var, 847 llvm::Value *value) { 848 const llvm::Type *varType = CGF.ConvertType(var->getType()); 849 850 // This can happen with promotions that actually don't change the 851 // underlying type, like the enum promotions. 852 if (value->getType() == varType) return value; 853 854 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) 855 && "unexpected promotion type"); 856 857 if (isa<llvm::IntegerType>(varType)) 858 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); 859 860 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); 861} 862 863void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 864 llvm::Function *Fn, 865 const FunctionArgList &Args) { 866 // If this is an implicit-return-zero function, go ahead and 867 // initialize the return value. TODO: it might be nice to have 868 // a more general mechanism for this that didn't require synthesized 869 // return statements. 870 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) { 871 if (FD->hasImplicitReturnZero()) { 872 QualType RetTy = FD->getResultType().getUnqualifiedType(); 873 const llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 874 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 875 Builder.CreateStore(Zero, ReturnValue); 876 } 877 } 878 879 // FIXME: We no longer need the types from FunctionArgList; lift up and 880 // simplify. 881 882 // Emit allocs for param decls. Give the LLVM Argument nodes names. 883 llvm::Function::arg_iterator AI = Fn->arg_begin(); 884 885 // Name the struct return argument. 886 if (CGM.ReturnTypeUsesSRet(FI)) { 887 AI->setName("agg.result"); 888 ++AI; 889 } 890 891 assert(FI.arg_size() == Args.size() && 892 "Mismatch between function signature & arguments."); 893 unsigned ArgNo = 1; 894 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 895 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 896 i != e; ++i, ++info_it, ++ArgNo) { 897 const VarDecl *Arg = *i; 898 QualType Ty = info_it->type; 899 const ABIArgInfo &ArgI = info_it->info; 900 901 bool isPromoted = 902 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); 903 904 switch (ArgI.getKind()) { 905 case ABIArgInfo::Indirect: { 906 llvm::Value *V = AI; 907 908 if (hasAggregateLLVMType(Ty)) { 909 // Aggregates and complex variables are accessed by reference. All we 910 // need to do is realign the value, if requested 911 if (ArgI.getIndirectRealign()) { 912 llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce"); 913 914 // Copy from the incoming argument pointer to the temporary with the 915 // appropriate alignment. 916 // 917 // FIXME: We should have a common utility for generating an aggregate 918 // copy. 919 const llvm::Type *I8PtrTy = Builder.getInt8PtrTy(); 920 CharUnits Size = getContext().getTypeSizeInChars(Ty); 921 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy); 922 llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy); 923 Builder.CreateMemCpy(Dst, 924 Src, 925 llvm::ConstantInt::get(IntPtrTy, 926 Size.getQuantity()), 927 ArgI.getIndirectAlign(), 928 false); 929 V = AlignedTemp; 930 } 931 } else { 932 // Load scalar value from indirect argument. 933 CharUnits Alignment = getContext().getTypeAlignInChars(Ty); 934 V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty); 935 936 if (isPromoted) 937 V = emitArgumentDemotion(*this, Arg, V); 938 } 939 EmitParmDecl(*Arg, V, ArgNo); 940 break; 941 } 942 943 case ABIArgInfo::Extend: 944 case ABIArgInfo::Direct: { 945 // If we have the trivial case, handle it with no muss and fuss. 946 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 947 ArgI.getCoerceToType() == ConvertType(Ty) && 948 ArgI.getDirectOffset() == 0) { 949 assert(AI != Fn->arg_end() && "Argument mismatch!"); 950 llvm::Value *V = AI; 951 952 if (Arg->getType().isRestrictQualified()) 953 AI->addAttr(llvm::Attribute::NoAlias); 954 955 if (isPromoted) 956 V = emitArgumentDemotion(*this, Arg, V); 957 958 EmitParmDecl(*Arg, V, ArgNo); 959 break; 960 } 961 962 llvm::AllocaInst *Alloca = CreateMemTemp(Ty, "coerce"); 963 964 // The alignment we need to use is the max of the requested alignment for 965 // the argument plus the alignment required by our access code below. 966 unsigned AlignmentToUse = 967 CGM.getTargetData().getABITypeAlignment(ArgI.getCoerceToType()); 968 AlignmentToUse = std::max(AlignmentToUse, 969 (unsigned)getContext().getDeclAlign(Arg).getQuantity()); 970 971 Alloca->setAlignment(AlignmentToUse); 972 llvm::Value *V = Alloca; 973 llvm::Value *Ptr = V; // Pointer to store into. 974 975 // If the value is offset in memory, apply the offset now. 976 if (unsigned Offs = ArgI.getDirectOffset()) { 977 Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy()); 978 Ptr = Builder.CreateConstGEP1_32(Ptr, Offs); 979 Ptr = Builder.CreateBitCast(Ptr, 980 llvm::PointerType::getUnqual(ArgI.getCoerceToType())); 981 } 982 983 // If the coerce-to type is a first class aggregate, we flatten it and 984 // pass the elements. Either way is semantically identical, but fast-isel 985 // and the optimizer generally likes scalar values better than FCAs. 986 if (const llvm::StructType *STy = 987 dyn_cast<llvm::StructType>(ArgI.getCoerceToType())) { 988 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy)); 989 990 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 991 assert(AI != Fn->arg_end() && "Argument mismatch!"); 992 AI->setName(Arg->getName() + ".coerce" + llvm::Twine(i)); 993 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i); 994 Builder.CreateStore(AI++, EltPtr); 995 } 996 } else { 997 // Simple case, just do a coerced store of the argument into the alloca. 998 assert(AI != Fn->arg_end() && "Argument mismatch!"); 999 AI->setName(Arg->getName() + ".coerce"); 1000 CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this); 1001 } 1002 1003 1004 // Match to what EmitParmDecl is expecting for this type. 1005 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) { 1006 V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty); 1007 if (isPromoted) 1008 V = emitArgumentDemotion(*this, Arg, V); 1009 } 1010 EmitParmDecl(*Arg, V, ArgNo); 1011 continue; // Skip ++AI increment, already done. 1012 } 1013 1014 case ABIArgInfo::Expand: { 1015 // If this structure was expanded into multiple arguments then 1016 // we need to create a temporary and reconstruct it from the 1017 // arguments. 1018 llvm::Value *Temp = CreateMemTemp(Ty, Arg->getName() + ".addr"); 1019 llvm::Function::arg_iterator End = 1020 ExpandTypeFromArgs(Ty, MakeAddrLValue(Temp, Ty), AI); 1021 EmitParmDecl(*Arg, Temp, ArgNo); 1022 1023 // Name the arguments used in expansion and increment AI. 1024 unsigned Index = 0; 1025 for (; AI != End; ++AI, ++Index) 1026 AI->setName(Arg->getName() + "." + llvm::Twine(Index)); 1027 continue; 1028 } 1029 1030 case ABIArgInfo::Ignore: 1031 // Initialize the local variable appropriately. 1032 if (hasAggregateLLVMType(Ty)) 1033 EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo); 1034 else 1035 EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())), 1036 ArgNo); 1037 1038 // Skip increment, no matching LLVM parameter. 1039 continue; 1040 } 1041 1042 ++AI; 1043 } 1044 assert(AI == Fn->arg_end() && "Argument mismatch!"); 1045} 1046 1047void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) { 1048 // Functions with no result always return void. 1049 if (ReturnValue == 0) { 1050 Builder.CreateRetVoid(); 1051 return; 1052 } 1053 1054 llvm::DebugLoc RetDbgLoc; 1055 llvm::Value *RV = 0; 1056 QualType RetTy = FI.getReturnType(); 1057 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1058 1059 switch (RetAI.getKind()) { 1060 case ABIArgInfo::Indirect: { 1061 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity(); 1062 if (RetTy->isAnyComplexType()) { 1063 ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false); 1064 StoreComplexToAddr(RT, CurFn->arg_begin(), false); 1065 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 1066 // Do nothing; aggregrates get evaluated directly into the destination. 1067 } else { 1068 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(), 1069 false, Alignment, RetTy); 1070 } 1071 break; 1072 } 1073 1074 case ABIArgInfo::Extend: 1075 case ABIArgInfo::Direct: 1076 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 1077 RetAI.getDirectOffset() == 0) { 1078 // The internal return value temp always will have pointer-to-return-type 1079 // type, just do a load. 1080 1081 // If the instruction right before the insertion point is a store to the 1082 // return value, we can elide the load, zap the store, and usually zap the 1083 // alloca. 1084 llvm::BasicBlock *InsertBB = Builder.GetInsertBlock(); 1085 llvm::StoreInst *SI = 0; 1086 if (InsertBB->empty() || 1087 !(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) || 1088 SI->getPointerOperand() != ReturnValue || SI->isVolatile()) { 1089 RV = Builder.CreateLoad(ReturnValue); 1090 } else { 1091 // Get the stored value and nuke the now-dead store. 1092 RetDbgLoc = SI->getDebugLoc(); 1093 RV = SI->getValueOperand(); 1094 SI->eraseFromParent(); 1095 1096 // If that was the only use of the return value, nuke it as well now. 1097 if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) { 1098 cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent(); 1099 ReturnValue = 0; 1100 } 1101 } 1102 } else { 1103 llvm::Value *V = ReturnValue; 1104 // If the value is offset in memory, apply the offset now. 1105 if (unsigned Offs = RetAI.getDirectOffset()) { 1106 V = Builder.CreateBitCast(V, Builder.getInt8PtrTy()); 1107 V = Builder.CreateConstGEP1_32(V, Offs); 1108 V = Builder.CreateBitCast(V, 1109 llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 1110 } 1111 1112 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); 1113 } 1114 break; 1115 1116 case ABIArgInfo::Ignore: 1117 break; 1118 1119 case ABIArgInfo::Expand: 1120 assert(0 && "Invalid ABI kind for return argument"); 1121 } 1122 1123 llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid(); 1124 if (!RetDbgLoc.isUnknown()) 1125 Ret->setDebugLoc(RetDbgLoc); 1126} 1127 1128void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, 1129 const VarDecl *param) { 1130 // StartFunction converted the ABI-lowered parameter(s) into a 1131 // local alloca. We need to turn that into an r-value suitable 1132 // for EmitCall. 1133 llvm::Value *local = GetAddrOfLocalVar(param); 1134 1135 QualType type = param->getType(); 1136 1137 // For the most part, we just need to load the alloca, except: 1138 // 1) aggregate r-values are actually pointers to temporaries, and 1139 // 2) references to aggregates are pointers directly to the aggregate. 1140 // I don't know why references to non-aggregates are different here. 1141 if (const ReferenceType *ref = type->getAs<ReferenceType>()) { 1142 if (hasAggregateLLVMType(ref->getPointeeType())) 1143 return args.add(RValue::getAggregate(local), type); 1144 1145 // Locals which are references to scalars are represented 1146 // with allocas holding the pointer. 1147 return args.add(RValue::get(Builder.CreateLoad(local)), type); 1148 } 1149 1150 if (type->isAnyComplexType()) { 1151 ComplexPairTy complex = LoadComplexFromAddr(local, /*volatile*/ false); 1152 return args.add(RValue::getComplex(complex), type); 1153 } 1154 1155 if (hasAggregateLLVMType(type)) 1156 return args.add(RValue::getAggregate(local), type); 1157 1158 unsigned alignment = getContext().getDeclAlign(param).getQuantity(); 1159 llvm::Value *value = EmitLoadOfScalar(local, false, alignment, type); 1160 return args.add(RValue::get(value), type); 1161} 1162 1163void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, 1164 QualType type) { 1165 if (type->isReferenceType()) 1166 return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0), 1167 type); 1168 1169 if (hasAggregateLLVMType(type) && isa<ImplicitCastExpr>(E) && 1170 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { 1171 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); 1172 assert(L.isSimple()); 1173 args.add(RValue::getAggregate(L.getAddress(), L.isVolatileQualified()), 1174 type, /*NeedsCopy*/true); 1175 return; 1176 } 1177 1178 args.add(EmitAnyExprToTemp(E), type); 1179} 1180 1181/// Emits a call or invoke instruction to the given function, depending 1182/// on the current state of the EH stack. 1183llvm::CallSite 1184CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 1185 llvm::Value * const *ArgBegin, 1186 llvm::Value * const *ArgEnd, 1187 const llvm::Twine &Name) { 1188 llvm::BasicBlock *InvokeDest = getInvokeDest(); 1189 if (!InvokeDest) 1190 return Builder.CreateCall(Callee, ArgBegin, ArgEnd, Name); 1191 1192 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 1193 llvm::InvokeInst *Invoke = Builder.CreateInvoke(Callee, ContBB, InvokeDest, 1194 ArgBegin, ArgEnd, Name); 1195 EmitBlock(ContBB); 1196 return Invoke; 1197} 1198 1199RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 1200 llvm::Value *Callee, 1201 ReturnValueSlot ReturnValue, 1202 const CallArgList &CallArgs, 1203 const Decl *TargetDecl, 1204 llvm::Instruction **callOrInvoke) { 1205 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 1206 llvm::SmallVector<llvm::Value*, 16> Args; 1207 1208 // Handle struct-return functions by passing a pointer to the 1209 // location that we would like to return into. 1210 QualType RetTy = CallInfo.getReturnType(); 1211 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 1212 1213 1214 // If the call returns a temporary with struct return, create a temporary 1215 // alloca to hold the result, unless one is given to us. 1216 if (CGM.ReturnTypeUsesSRet(CallInfo)) { 1217 llvm::Value *Value = ReturnValue.getValue(); 1218 if (!Value) 1219 Value = CreateMemTemp(RetTy); 1220 Args.push_back(Value); 1221 } 1222 1223 assert(CallInfo.arg_size() == CallArgs.size() && 1224 "Mismatch between function signature & arguments."); 1225 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 1226 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 1227 I != E; ++I, ++info_it) { 1228 const ABIArgInfo &ArgInfo = info_it->info; 1229 RValue RV = I->RV; 1230 1231 unsigned Alignment = 1232 getContext().getTypeAlignInChars(I->Ty).getQuantity(); 1233 switch (ArgInfo.getKind()) { 1234 case ABIArgInfo::Indirect: { 1235 if (RV.isScalar() || RV.isComplex()) { 1236 // Make a temporary alloca to pass the argument. 1237 Args.push_back(CreateMemTemp(I->Ty)); 1238 if (RV.isScalar()) 1239 EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, 1240 Alignment, I->Ty); 1241 else 1242 StoreComplexToAddr(RV.getComplexVal(), Args.back(), false); 1243 } else if (I->NeedsCopy && !ArgInfo.getIndirectByVal()) { 1244 Args.push_back(CreateMemTemp(I->Ty)); 1245 EmitAggregateCopy(Args.back(), RV.getAggregateAddr(), I->Ty, 1246 RV.isVolatileQualified()); 1247 } else { 1248 Args.push_back(RV.getAggregateAddr()); 1249 } 1250 break; 1251 } 1252 1253 case ABIArgInfo::Ignore: 1254 break; 1255 1256 case ABIArgInfo::Extend: 1257 case ABIArgInfo::Direct: { 1258 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 1259 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 1260 ArgInfo.getDirectOffset() == 0) { 1261 if (RV.isScalar()) 1262 Args.push_back(RV.getScalarVal()); 1263 else 1264 Args.push_back(Builder.CreateLoad(RV.getAggregateAddr())); 1265 break; 1266 } 1267 1268 // FIXME: Avoid the conversion through memory if possible. 1269 llvm::Value *SrcPtr; 1270 if (RV.isScalar()) { 1271 SrcPtr = CreateMemTemp(I->Ty, "coerce"); 1272 EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, Alignment, I->Ty); 1273 } else if (RV.isComplex()) { 1274 SrcPtr = CreateMemTemp(I->Ty, "coerce"); 1275 StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false); 1276 } else 1277 SrcPtr = RV.getAggregateAddr(); 1278 1279 // If the value is offset in memory, apply the offset now. 1280 if (unsigned Offs = ArgInfo.getDirectOffset()) { 1281 SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy()); 1282 SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs); 1283 SrcPtr = Builder.CreateBitCast(SrcPtr, 1284 llvm::PointerType::getUnqual(ArgInfo.getCoerceToType())); 1285 1286 } 1287 1288 // If the coerce-to type is a first class aggregate, we flatten it and 1289 // pass the elements. Either way is semantically identical, but fast-isel 1290 // and the optimizer generally likes scalar values better than FCAs. 1291 if (const llvm::StructType *STy = 1292 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) { 1293 SrcPtr = Builder.CreateBitCast(SrcPtr, 1294 llvm::PointerType::getUnqual(STy)); 1295 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1296 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i); 1297 llvm::LoadInst *LI = Builder.CreateLoad(EltPtr); 1298 // We don't know what we're loading from. 1299 LI->setAlignment(1); 1300 Args.push_back(LI); 1301 } 1302 } else { 1303 // In the simple case, just pass the coerced loaded value. 1304 Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), 1305 *this)); 1306 } 1307 1308 break; 1309 } 1310 1311 case ABIArgInfo::Expand: 1312 ExpandTypeToArgs(I->Ty, RV, Args); 1313 break; 1314 } 1315 } 1316 1317 // If the callee is a bitcast of a function to a varargs pointer to function 1318 // type, check to see if we can remove the bitcast. This handles some cases 1319 // with unprototyped functions. 1320 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee)) 1321 if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) { 1322 const llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType()); 1323 const llvm::FunctionType *CurFT = 1324 cast<llvm::FunctionType>(CurPT->getElementType()); 1325 const llvm::FunctionType *ActualFT = CalleeF->getFunctionType(); 1326 1327 if (CE->getOpcode() == llvm::Instruction::BitCast && 1328 ActualFT->getReturnType() == CurFT->getReturnType() && 1329 ActualFT->getNumParams() == CurFT->getNumParams() && 1330 ActualFT->getNumParams() == Args.size() && 1331 (CurFT->isVarArg() || !ActualFT->isVarArg())) { 1332 bool ArgsMatch = true; 1333 for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i) 1334 if (ActualFT->getParamType(i) != CurFT->getParamType(i)) { 1335 ArgsMatch = false; 1336 break; 1337 } 1338 1339 // Strip the cast if we can get away with it. This is a nice cleanup, 1340 // but also allows us to inline the function at -O0 if it is marked 1341 // always_inline. 1342 if (ArgsMatch) 1343 Callee = CalleeF; 1344 } 1345 } 1346 1347 1348 unsigned CallingConv; 1349 CodeGen::AttributeListType AttributeList; 1350 CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv); 1351 llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(), 1352 AttributeList.end()); 1353 1354 llvm::BasicBlock *InvokeDest = 0; 1355 if (!(Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) 1356 InvokeDest = getInvokeDest(); 1357 1358 llvm::CallSite CS; 1359 if (!InvokeDest) { 1360 CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size()); 1361 } else { 1362 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 1363 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, 1364 Args.data(), Args.data()+Args.size()); 1365 EmitBlock(Cont); 1366 } 1367 if (callOrInvoke) 1368 *callOrInvoke = CS.getInstruction(); 1369 1370 CS.setAttributes(Attrs); 1371 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 1372 1373 // If the call doesn't return, finish the basic block and clear the 1374 // insertion point; this allows the rest of IRgen to discard 1375 // unreachable code. 1376 if (CS.doesNotReturn()) { 1377 Builder.CreateUnreachable(); 1378 Builder.ClearInsertionPoint(); 1379 1380 // FIXME: For now, emit a dummy basic block because expr emitters in 1381 // generally are not ready to handle emitting expressions at unreachable 1382 // points. 1383 EnsureInsertPoint(); 1384 1385 // Return a reasonable RValue. 1386 return GetUndefRValue(RetTy); 1387 } 1388 1389 llvm::Instruction *CI = CS.getInstruction(); 1390 if (Builder.isNamePreserving() && !CI->getType()->isVoidTy()) 1391 CI->setName("call"); 1392 1393 switch (RetAI.getKind()) { 1394 case ABIArgInfo::Indirect: { 1395 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity(); 1396 if (RetTy->isAnyComplexType()) 1397 return RValue::getComplex(LoadComplexFromAddr(Args[0], false)); 1398 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) 1399 return RValue::getAggregate(Args[0]); 1400 return RValue::get(EmitLoadOfScalar(Args[0], false, Alignment, RetTy)); 1401 } 1402 1403 case ABIArgInfo::Ignore: 1404 // If we are ignoring an argument that had a result, make sure to 1405 // construct the appropriate return value for our caller. 1406 return GetUndefRValue(RetTy); 1407 1408 case ABIArgInfo::Extend: 1409 case ABIArgInfo::Direct: { 1410 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 1411 RetAI.getDirectOffset() == 0) { 1412 if (RetTy->isAnyComplexType()) { 1413 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 1414 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 1415 return RValue::getComplex(std::make_pair(Real, Imag)); 1416 } 1417 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 1418 llvm::Value *DestPtr = ReturnValue.getValue(); 1419 bool DestIsVolatile = ReturnValue.isVolatile(); 1420 1421 if (!DestPtr) { 1422 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 1423 DestIsVolatile = false; 1424 } 1425 Builder.CreateStore(CI, DestPtr, DestIsVolatile); 1426 return RValue::getAggregate(DestPtr); 1427 } 1428 return RValue::get(CI); 1429 } 1430 1431 llvm::Value *DestPtr = ReturnValue.getValue(); 1432 bool DestIsVolatile = ReturnValue.isVolatile(); 1433 1434 if (!DestPtr) { 1435 DestPtr = CreateMemTemp(RetTy, "coerce"); 1436 DestIsVolatile = false; 1437 } 1438 1439 // If the value is offset in memory, apply the offset now. 1440 llvm::Value *StorePtr = DestPtr; 1441 if (unsigned Offs = RetAI.getDirectOffset()) { 1442 StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy()); 1443 StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs); 1444 StorePtr = Builder.CreateBitCast(StorePtr, 1445 llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 1446 } 1447 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); 1448 1449 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity(); 1450 if (RetTy->isAnyComplexType()) 1451 return RValue::getComplex(LoadComplexFromAddr(DestPtr, false)); 1452 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) 1453 return RValue::getAggregate(DestPtr); 1454 return RValue::get(EmitLoadOfScalar(DestPtr, false, Alignment, RetTy)); 1455 } 1456 1457 case ABIArgInfo::Expand: 1458 assert(0 && "Invalid ABI kind for return argument"); 1459 } 1460 1461 assert(0 && "Unhandled ABIArgInfo::Kind"); 1462 return RValue::get(0); 1463} 1464 1465/* VarArg handling */ 1466 1467llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) { 1468 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this); 1469} 1470