CGCall.cpp revision 263366f9241366f29ba65b703120f302490c39ff
1//===--- CGCall.cpp - Encapsulate calling convention details ----*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// These classes wrap the information about a call or function 11// definition used to handle ABI compliancy. 12// 13//===----------------------------------------------------------------------===// 14 15#include "CGCall.h" 16#include "CGCXXABI.h" 17#include "ABIInfo.h" 18#include "CodeGenFunction.h" 19#include "CodeGenModule.h" 20#include "TargetInfo.h" 21#include "clang/Basic/TargetInfo.h" 22#include "clang/AST/Decl.h" 23#include "clang/AST/DeclCXX.h" 24#include "clang/AST/DeclObjC.h" 25#include "clang/Frontend/CodeGenOptions.h" 26#include "llvm/Attributes.h" 27#include "llvm/Support/CallSite.h" 28#include "llvm/DataLayout.h" 29#include "llvm/InlineAsm.h" 30#include "llvm/Transforms/Utils/Local.h" 31using namespace clang; 32using namespace CodeGen; 33 34/***/ 35 36static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) { 37 switch (CC) { 38 default: return llvm::CallingConv::C; 39 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 40 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 41 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 42 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; 43 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 44 // TODO: add support for CC_X86Pascal to llvm 45 } 46} 47 48/// Derives the 'this' type for codegen purposes, i.e. ignoring method 49/// qualification. 50/// FIXME: address space qualification? 51static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) { 52 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 53 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 54} 55 56/// Returns the canonical formal type of the given C++ method. 57static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 58 return MD->getType()->getCanonicalTypeUnqualified() 59 .getAs<FunctionProtoType>(); 60} 61 62/// Returns the "extra-canonicalized" return type, which discards 63/// qualifiers on the return type. Codegen doesn't care about them, 64/// and it makes ABI code a little easier to be able to assume that 65/// all parameter and return types are top-level unqualified. 66static CanQualType GetReturnType(QualType RetTy) { 67 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 68} 69 70/// Arrange the argument and result information for a value of the given 71/// unprototyped freestanding function type. 72const CGFunctionInfo & 73CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { 74 // When translating an unprototyped function type, always use a 75 // variadic type. 76 return arrangeLLVMFunctionInfo(FTNP->getResultType().getUnqualifiedType(), 77 ArrayRef<CanQualType>(), 78 FTNP->getExtInfo(), 79 RequiredArgs(0)); 80} 81 82/// Arrange the LLVM function layout for a value of the given function 83/// type, on top of any implicit parameters already stored. Use the 84/// given ExtInfo instead of the ExtInfo from the function type. 85static const CGFunctionInfo &arrangeLLVMFunctionInfo(CodeGenTypes &CGT, 86 SmallVectorImpl<CanQualType> &prefix, 87 CanQual<FunctionProtoType> FTP, 88 FunctionType::ExtInfo extInfo) { 89 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); 90 // FIXME: Kill copy. 91 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) 92 prefix.push_back(FTP->getArgType(i)); 93 CanQualType resultType = FTP->getResultType().getUnqualifiedType(); 94 return CGT.arrangeLLVMFunctionInfo(resultType, prefix, extInfo, required); 95} 96 97/// Arrange the argument and result information for a free function (i.e. 98/// not a C++ or ObjC instance method) of the given type. 99static const CGFunctionInfo &arrangeFreeFunctionType(CodeGenTypes &CGT, 100 SmallVectorImpl<CanQualType> &prefix, 101 CanQual<FunctionProtoType> FTP) { 102 return arrangeLLVMFunctionInfo(CGT, prefix, FTP, FTP->getExtInfo()); 103} 104 105/// Given the formal ext-info of a C++ instance method, adjust it 106/// according to the C++ ABI in effect. 107static void adjustCXXMethodInfo(CodeGenTypes &CGT, 108 FunctionType::ExtInfo &extInfo, 109 bool isVariadic) { 110 if (extInfo.getCC() == CC_Default) { 111 CallingConv CC = CGT.getContext().getDefaultCXXMethodCallConv(isVariadic); 112 extInfo = extInfo.withCallingConv(CC); 113 } 114} 115 116/// Arrange the argument and result information for a free function (i.e. 117/// not a C++ or ObjC instance method) of the given type. 118static const CGFunctionInfo &arrangeCXXMethodType(CodeGenTypes &CGT, 119 SmallVectorImpl<CanQualType> &prefix, 120 CanQual<FunctionProtoType> FTP) { 121 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 122 adjustCXXMethodInfo(CGT, extInfo, FTP->isVariadic()); 123 return arrangeLLVMFunctionInfo(CGT, prefix, FTP, extInfo); 124} 125 126/// Arrange the argument and result information for a value of the 127/// given freestanding function type. 128const CGFunctionInfo & 129CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) { 130 SmallVector<CanQualType, 16> argTypes; 131 return ::arrangeFreeFunctionType(*this, argTypes, FTP); 132} 133 134static CallingConv getCallingConventionForDecl(const Decl *D) { 135 // Set the appropriate calling convention for the Function. 136 if (D->hasAttr<StdCallAttr>()) 137 return CC_X86StdCall; 138 139 if (D->hasAttr<FastCallAttr>()) 140 return CC_X86FastCall; 141 142 if (D->hasAttr<ThisCallAttr>()) 143 return CC_X86ThisCall; 144 145 if (D->hasAttr<PascalAttr>()) 146 return CC_X86Pascal; 147 148 if (PcsAttr *PCS = D->getAttr<PcsAttr>()) 149 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); 150 151 if (D->hasAttr<PnaclCallAttr>()) 152 return CC_PnaclCall; 153 154 return CC_C; 155} 156 157/// Arrange the argument and result information for a call to an 158/// unknown C++ non-static member function of the given abstract type. 159/// The member function must be an ordinary function, i.e. not a 160/// constructor or destructor. 161const CGFunctionInfo & 162CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, 163 const FunctionProtoType *FTP) { 164 SmallVector<CanQualType, 16> argTypes; 165 166 // Add the 'this' pointer. 167 argTypes.push_back(GetThisType(Context, RD)); 168 169 return ::arrangeCXXMethodType(*this, argTypes, 170 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); 171} 172 173/// Arrange the argument and result information for a declaration or 174/// definition of the given C++ non-static member function. The 175/// member function must be an ordinary function, i.e. not a 176/// constructor or destructor. 177const CGFunctionInfo & 178CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { 179 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!"); 180 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 181 182 CanQual<FunctionProtoType> prototype = GetFormalType(MD); 183 184 if (MD->isInstance()) { 185 // The abstract case is perfectly fine. 186 return arrangeCXXMethodType(MD->getParent(), prototype.getTypePtr()); 187 } 188 189 return arrangeFreeFunctionType(prototype); 190} 191 192/// Arrange the argument and result information for a declaration 193/// or definition to the given constructor variant. 194const CGFunctionInfo & 195CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D, 196 CXXCtorType ctorKind) { 197 SmallVector<CanQualType, 16> argTypes; 198 argTypes.push_back(GetThisType(Context, D->getParent())); 199 CanQualType resultType = Context.VoidTy; 200 201 TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes); 202 203 CanQual<FunctionProtoType> FTP = GetFormalType(D); 204 205 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size()); 206 207 // Add the formal parameters. 208 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) 209 argTypes.push_back(FTP->getArgType(i)); 210 211 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 212 adjustCXXMethodInfo(*this, extInfo, FTP->isVariadic()); 213 return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo, required); 214} 215 216/// Arrange the argument and result information for a declaration, 217/// definition, or call to the given destructor variant. It so 218/// happens that all three cases produce the same information. 219const CGFunctionInfo & 220CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D, 221 CXXDtorType dtorKind) { 222 SmallVector<CanQualType, 2> argTypes; 223 argTypes.push_back(GetThisType(Context, D->getParent())); 224 CanQualType resultType = Context.VoidTy; 225 226 TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes); 227 228 CanQual<FunctionProtoType> FTP = GetFormalType(D); 229 assert(FTP->getNumArgs() == 0 && "dtor with formal parameters"); 230 assert(FTP->isVariadic() == 0 && "dtor with formal parameters"); 231 232 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 233 adjustCXXMethodInfo(*this, extInfo, false); 234 return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo, 235 RequiredArgs::All); 236} 237 238/// Arrange the argument and result information for the declaration or 239/// definition of the given function. 240const CGFunctionInfo & 241CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { 242 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 243 if (MD->isInstance()) 244 return arrangeCXXMethodDeclaration(MD); 245 246 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 247 248 assert(isa<FunctionType>(FTy)); 249 250 // When declaring a function without a prototype, always use a 251 // non-variadic type. 252 if (isa<FunctionNoProtoType>(FTy)) { 253 CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>(); 254 return arrangeLLVMFunctionInfo(noProto->getResultType(), 255 ArrayRef<CanQualType>(), 256 noProto->getExtInfo(), 257 RequiredArgs::All); 258 } 259 260 assert(isa<FunctionProtoType>(FTy)); 261 return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>()); 262} 263 264/// Arrange the argument and result information for the declaration or 265/// definition of an Objective-C method. 266const CGFunctionInfo & 267CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { 268 // It happens that this is the same as a call with no optional 269 // arguments, except also using the formal 'self' type. 270 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); 271} 272 273/// Arrange the argument and result information for the function type 274/// through which to perform a send to the given Objective-C method, 275/// using the given receiver type. The receiver type is not always 276/// the 'self' type of the method or even an Objective-C pointer type. 277/// This is *not* the right method for actually performing such a 278/// message send, due to the possibility of optional arguments. 279const CGFunctionInfo & 280CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, 281 QualType receiverType) { 282 SmallVector<CanQualType, 16> argTys; 283 argTys.push_back(Context.getCanonicalParamType(receiverType)); 284 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 285 // FIXME: Kill copy? 286 for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(), 287 e = MD->param_end(); i != e; ++i) { 288 argTys.push_back(Context.getCanonicalParamType((*i)->getType())); 289 } 290 291 FunctionType::ExtInfo einfo; 292 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD)); 293 294 if (getContext().getLangOpts().ObjCAutoRefCount && 295 MD->hasAttr<NSReturnsRetainedAttr>()) 296 einfo = einfo.withProducesResult(true); 297 298 RequiredArgs required = 299 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); 300 301 return arrangeLLVMFunctionInfo(GetReturnType(MD->getResultType()), argTys, 302 einfo, required); 303} 304 305const CGFunctionInfo & 306CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { 307 // FIXME: Do we need to handle ObjCMethodDecl? 308 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 309 310 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 311 return arrangeCXXConstructorDeclaration(CD, GD.getCtorType()); 312 313 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) 314 return arrangeCXXDestructor(DD, GD.getDtorType()); 315 316 return arrangeFunctionDeclaration(FD); 317} 318 319/// Figure out the rules for calling a function with the given formal 320/// type using the given arguments. The arguments are necessary 321/// because the function might be unprototyped, in which case it's 322/// target-dependent in crazy ways. 323const CGFunctionInfo & 324CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, 325 const FunctionType *fnType) { 326 RequiredArgs required = RequiredArgs::All; 327 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { 328 if (proto->isVariadic()) 329 required = RequiredArgs(proto->getNumArgs()); 330 } else if (CGM.getTargetCodeGenInfo() 331 .isNoProtoCallVariadic(args, cast<FunctionNoProtoType>(fnType))) { 332 required = RequiredArgs(0); 333 } 334 335 return arrangeFreeFunctionCall(fnType->getResultType(), args, 336 fnType->getExtInfo(), required); 337} 338 339const CGFunctionInfo & 340CodeGenTypes::arrangeFreeFunctionCall(QualType resultType, 341 const CallArgList &args, 342 FunctionType::ExtInfo info, 343 RequiredArgs required) { 344 // FIXME: Kill copy. 345 SmallVector<CanQualType, 16> argTypes; 346 for (CallArgList::const_iterator i = args.begin(), e = args.end(); 347 i != e; ++i) 348 argTypes.push_back(Context.getCanonicalParamType(i->Ty)); 349 return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info, 350 required); 351} 352 353/// Arrange a call to a C++ method, passing the given arguments. 354const CGFunctionInfo & 355CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, 356 const FunctionProtoType *FPT, 357 RequiredArgs required) { 358 // FIXME: Kill copy. 359 SmallVector<CanQualType, 16> argTypes; 360 for (CallArgList::const_iterator i = args.begin(), e = args.end(); 361 i != e; ++i) 362 argTypes.push_back(Context.getCanonicalParamType(i->Ty)); 363 364 FunctionType::ExtInfo info = FPT->getExtInfo(); 365 adjustCXXMethodInfo(*this, info, FPT->isVariadic()); 366 return arrangeLLVMFunctionInfo(GetReturnType(FPT->getResultType()), 367 argTypes, info, required); 368} 369 370const CGFunctionInfo & 371CodeGenTypes::arrangeFunctionDeclaration(QualType resultType, 372 const FunctionArgList &args, 373 const FunctionType::ExtInfo &info, 374 bool isVariadic) { 375 // FIXME: Kill copy. 376 SmallVector<CanQualType, 16> argTypes; 377 for (FunctionArgList::const_iterator i = args.begin(), e = args.end(); 378 i != e; ++i) 379 argTypes.push_back(Context.getCanonicalParamType((*i)->getType())); 380 381 RequiredArgs required = 382 (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All); 383 return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info, 384 required); 385} 386 387const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { 388 return arrangeLLVMFunctionInfo(getContext().VoidTy, ArrayRef<CanQualType>(), 389 FunctionType::ExtInfo(), RequiredArgs::All); 390} 391 392/// Arrange the argument and result information for an abstract value 393/// of a given function type. This is the method which all of the 394/// above functions ultimately defer to. 395const CGFunctionInfo & 396CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, 397 ArrayRef<CanQualType> argTypes, 398 FunctionType::ExtInfo info, 399 RequiredArgs required) { 400#ifndef NDEBUG 401 for (ArrayRef<CanQualType>::const_iterator 402 I = argTypes.begin(), E = argTypes.end(); I != E; ++I) 403 assert(I->isCanonicalAsParam()); 404#endif 405 406 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); 407 408 // Lookup or create unique function info. 409 llvm::FoldingSetNodeID ID; 410 CGFunctionInfo::Profile(ID, info, required, resultType, argTypes); 411 412 void *insertPos = 0; 413 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); 414 if (FI) 415 return *FI; 416 417 // Construct the function info. We co-allocate the ArgInfos. 418 FI = CGFunctionInfo::create(CC, info, resultType, argTypes, required); 419 FunctionInfos.InsertNode(FI, insertPos); 420 421 bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted; 422 assert(inserted && "Recursively being processed?"); 423 424 // Compute ABI information. 425 getABIInfo().computeInfo(*FI); 426 427 // Loop over all of the computed argument and return value info. If any of 428 // them are direct or extend without a specified coerce type, specify the 429 // default now. 430 ABIArgInfo &retInfo = FI->getReturnInfo(); 431 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == 0) 432 retInfo.setCoerceToType(ConvertType(FI->getReturnType())); 433 434 for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end(); 435 I != E; ++I) 436 if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0) 437 I->info.setCoerceToType(ConvertType(I->type)); 438 439 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; 440 assert(erased && "Not in set?"); 441 442 return *FI; 443} 444 445CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, 446 const FunctionType::ExtInfo &info, 447 CanQualType resultType, 448 ArrayRef<CanQualType> argTypes, 449 RequiredArgs required) { 450 void *buffer = operator new(sizeof(CGFunctionInfo) + 451 sizeof(ArgInfo) * (argTypes.size() + 1)); 452 CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); 453 FI->CallingConvention = llvmCC; 454 FI->EffectiveCallingConvention = llvmCC; 455 FI->ASTCallingConvention = info.getCC(); 456 FI->NoReturn = info.getNoReturn(); 457 FI->ReturnsRetained = info.getProducesResult(); 458 FI->Required = required; 459 FI->HasRegParm = info.getHasRegParm(); 460 FI->RegParm = info.getRegParm(); 461 FI->NumArgs = argTypes.size(); 462 FI->getArgsBuffer()[0].type = resultType; 463 for (unsigned i = 0, e = argTypes.size(); i != e; ++i) 464 FI->getArgsBuffer()[i + 1].type = argTypes[i]; 465 return FI; 466} 467 468/***/ 469 470void CodeGenTypes::GetExpandedTypes(QualType type, 471 SmallVectorImpl<llvm::Type*> &expandedTypes) { 472 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) { 473 uint64_t NumElts = AT->getSize().getZExtValue(); 474 for (uint64_t Elt = 0; Elt < NumElts; ++Elt) 475 GetExpandedTypes(AT->getElementType(), expandedTypes); 476 } else if (const RecordType *RT = type->getAs<RecordType>()) { 477 const RecordDecl *RD = RT->getDecl(); 478 assert(!RD->hasFlexibleArrayMember() && 479 "Cannot expand structure with flexible array."); 480 if (RD->isUnion()) { 481 // Unions can be here only in degenerative cases - all the fields are same 482 // after flattening. Thus we have to use the "largest" field. 483 const FieldDecl *LargestFD = 0; 484 CharUnits UnionSize = CharUnits::Zero(); 485 486 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 487 i != e; ++i) { 488 const FieldDecl *FD = *i; 489 assert(!FD->isBitField() && 490 "Cannot expand structure with bit-field members."); 491 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType()); 492 if (UnionSize < FieldSize) { 493 UnionSize = FieldSize; 494 LargestFD = FD; 495 } 496 } 497 if (LargestFD) 498 GetExpandedTypes(LargestFD->getType(), expandedTypes); 499 } else { 500 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 501 i != e; ++i) { 502 assert(!i->isBitField() && 503 "Cannot expand structure with bit-field members."); 504 GetExpandedTypes(i->getType(), expandedTypes); 505 } 506 } 507 } else if (const ComplexType *CT = type->getAs<ComplexType>()) { 508 llvm::Type *EltTy = ConvertType(CT->getElementType()); 509 expandedTypes.push_back(EltTy); 510 expandedTypes.push_back(EltTy); 511 } else 512 expandedTypes.push_back(ConvertType(type)); 513} 514 515llvm::Function::arg_iterator 516CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, 517 llvm::Function::arg_iterator AI) { 518 assert(LV.isSimple() && 519 "Unexpected non-simple lvalue during struct expansion."); 520 521 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 522 unsigned NumElts = AT->getSize().getZExtValue(); 523 QualType EltTy = AT->getElementType(); 524 for (unsigned Elt = 0; Elt < NumElts; ++Elt) { 525 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, Elt); 526 LValue LV = MakeAddrLValue(EltAddr, EltTy); 527 AI = ExpandTypeFromArgs(EltTy, LV, AI); 528 } 529 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 530 RecordDecl *RD = RT->getDecl(); 531 if (RD->isUnion()) { 532 // Unions can be here only in degenerative cases - all the fields are same 533 // after flattening. Thus we have to use the "largest" field. 534 const FieldDecl *LargestFD = 0; 535 CharUnits UnionSize = CharUnits::Zero(); 536 537 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 538 i != e; ++i) { 539 const FieldDecl *FD = *i; 540 assert(!FD->isBitField() && 541 "Cannot expand structure with bit-field members."); 542 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType()); 543 if (UnionSize < FieldSize) { 544 UnionSize = FieldSize; 545 LargestFD = FD; 546 } 547 } 548 if (LargestFD) { 549 // FIXME: What are the right qualifiers here? 550 LValue SubLV = EmitLValueForField(LV, LargestFD); 551 AI = ExpandTypeFromArgs(LargestFD->getType(), SubLV, AI); 552 } 553 } else { 554 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 555 i != e; ++i) { 556 FieldDecl *FD = *i; 557 QualType FT = FD->getType(); 558 559 // FIXME: What are the right qualifiers here? 560 LValue SubLV = EmitLValueForField(LV, FD); 561 AI = ExpandTypeFromArgs(FT, SubLV, AI); 562 } 563 } 564 } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 565 QualType EltTy = CT->getElementType(); 566 llvm::Value *RealAddr = Builder.CreateStructGEP(LV.getAddress(), 0, "real"); 567 EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy)); 568 llvm::Value *ImagAddr = Builder.CreateStructGEP(LV.getAddress(), 1, "imag"); 569 EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy)); 570 } else { 571 EmitStoreThroughLValue(RValue::get(AI), LV); 572 ++AI; 573 } 574 575 return AI; 576} 577 578/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 579/// accessing some number of bytes out of it, try to gep into the struct to get 580/// at its inner goodness. Dive as deep as possible without entering an element 581/// with an in-memory size smaller than DstSize. 582static llvm::Value * 583EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr, 584 llvm::StructType *SrcSTy, 585 uint64_t DstSize, CodeGenFunction &CGF) { 586 // We can't dive into a zero-element struct. 587 if (SrcSTy->getNumElements() == 0) return SrcPtr; 588 589 llvm::Type *FirstElt = SrcSTy->getElementType(0); 590 591 // If the first elt is at least as large as what we're looking for, or if the 592 // first element is the same size as the whole struct, we can enter it. 593 uint64_t FirstEltSize = 594 CGF.CGM.getDataLayout().getTypeAllocSize(FirstElt); 595 if (FirstEltSize < DstSize && 596 FirstEltSize < CGF.CGM.getDataLayout().getTypeAllocSize(SrcSTy)) 597 return SrcPtr; 598 599 // GEP into the first element. 600 SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive"); 601 602 // If the first element is a struct, recurse. 603 llvm::Type *SrcTy = 604 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 605 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 606 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 607 608 return SrcPtr; 609} 610 611/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 612/// are either integers or pointers. This does a truncation of the value if it 613/// is too large or a zero extension if it is too small. 614static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 615 llvm::Type *Ty, 616 CodeGenFunction &CGF) { 617 if (Val->getType() == Ty) 618 return Val; 619 620 if (isa<llvm::PointerType>(Val->getType())) { 621 // If this is Pointer->Pointer avoid conversion to and from int. 622 if (isa<llvm::PointerType>(Ty)) 623 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 624 625 // Convert the pointer to an integer so we can play with its width. 626 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 627 } 628 629 llvm::Type *DestIntTy = Ty; 630 if (isa<llvm::PointerType>(DestIntTy)) 631 DestIntTy = CGF.IntPtrTy; 632 633 if (Val->getType() != DestIntTy) 634 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 635 636 if (isa<llvm::PointerType>(Ty)) 637 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 638 return Val; 639} 640 641 642 643/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 644/// a pointer to an object of type \arg Ty. 645/// 646/// This safely handles the case when the src type is smaller than the 647/// destination type; in this situation the values of bits which not 648/// present in the src are undefined. 649static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr, 650 llvm::Type *Ty, 651 CodeGenFunction &CGF) { 652 llvm::Type *SrcTy = 653 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 654 655 // If SrcTy and Ty are the same, just do a load. 656 if (SrcTy == Ty) 657 return CGF.Builder.CreateLoad(SrcPtr); 658 659 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); 660 661 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 662 SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 663 SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 664 } 665 666 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 667 668 // If the source and destination are integer or pointer types, just do an 669 // extension or truncation to the desired type. 670 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 671 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 672 llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr); 673 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 674 } 675 676 // If load is legal, just bitcast the src pointer. 677 if (SrcSize >= DstSize) { 678 // Generally SrcSize is never greater than DstSize, since this means we are 679 // losing bits. However, this can happen in cases where the structure has 680 // additional padding, for example due to a user specified alignment. 681 // 682 // FIXME: Assert that we aren't truncating non-padding bits when have access 683 // to that information. 684 llvm::Value *Casted = 685 CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty)); 686 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); 687 // FIXME: Use better alignment / avoid requiring aligned load. 688 Load->setAlignment(1); 689 return Load; 690 } 691 692 // Otherwise do coercion through memory. This is stupid, but 693 // simple. 694 llvm::Value *Tmp = CGF.CreateTempAlloca(Ty); 695 llvm::Value *Casted = 696 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy)); 697 llvm::StoreInst *Store = 698 CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted); 699 // FIXME: Use better alignment / avoid requiring aligned store. 700 Store->setAlignment(1); 701 return CGF.Builder.CreateLoad(Tmp); 702} 703 704// Function to store a first-class aggregate into memory. We prefer to 705// store the elements rather than the aggregate to be more friendly to 706// fast-isel. 707// FIXME: Do we need to recurse here? 708static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, 709 llvm::Value *DestPtr, bool DestIsVolatile, 710 bool LowAlignment) { 711 // Prefer scalar stores to first-class aggregate stores. 712 if (llvm::StructType *STy = 713 dyn_cast<llvm::StructType>(Val->getType())) { 714 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 715 llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i); 716 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i); 717 llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr, 718 DestIsVolatile); 719 if (LowAlignment) 720 SI->setAlignment(1); 721 } 722 } else { 723 llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile); 724 if (LowAlignment) 725 SI->setAlignment(1); 726 } 727} 728 729/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 730/// where the source and destination may have different types. 731/// 732/// This safely handles the case when the src type is larger than the 733/// destination type; the upper bits of the src will be lost. 734static void CreateCoercedStore(llvm::Value *Src, 735 llvm::Value *DstPtr, 736 bool DstIsVolatile, 737 CodeGenFunction &CGF) { 738 llvm::Type *SrcTy = Src->getType(); 739 llvm::Type *DstTy = 740 cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 741 if (SrcTy == DstTy) { 742 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 743 return; 744 } 745 746 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 747 748 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 749 DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF); 750 DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 751 } 752 753 // If the source and destination are integer or pointer types, just do an 754 // extension or truncation to the desired type. 755 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 756 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 757 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 758 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 759 return; 760 } 761 762 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); 763 764 // If store is legal, just bitcast the src pointer. 765 if (SrcSize <= DstSize) { 766 llvm::Value *Casted = 767 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy)); 768 // FIXME: Use better alignment / avoid requiring aligned store. 769 BuildAggStore(CGF, Src, Casted, DstIsVolatile, true); 770 } else { 771 // Otherwise do coercion through memory. This is stupid, but 772 // simple. 773 774 // Generally SrcSize is never greater than DstSize, since this means we are 775 // losing bits. However, this can happen in cases where the structure has 776 // additional padding, for example due to a user specified alignment. 777 // 778 // FIXME: Assert that we aren't truncating non-padding bits when have access 779 // to that information. 780 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy); 781 CGF.Builder.CreateStore(Src, Tmp); 782 llvm::Value *Casted = 783 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy)); 784 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); 785 // FIXME: Use better alignment / avoid requiring aligned load. 786 Load->setAlignment(1); 787 CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile); 788 } 789} 790 791/***/ 792 793bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 794 return FI.getReturnInfo().isIndirect(); 795} 796 797bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 798 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 799 switch (BT->getKind()) { 800 default: 801 return false; 802 case BuiltinType::Float: 803 return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Float); 804 case BuiltinType::Double: 805 return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Double); 806 case BuiltinType::LongDouble: 807 return getContext().getTargetInfo().useObjCFPRetForRealType( 808 TargetInfo::LongDouble); 809 } 810 } 811 812 return false; 813} 814 815bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { 816 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { 817 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { 818 if (BT->getKind() == BuiltinType::LongDouble) 819 return getContext().getTargetInfo().useObjCFP2RetForComplexLongDouble(); 820 } 821 } 822 823 return false; 824} 825 826llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 827 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); 828 return GetFunctionType(FI); 829} 830 831llvm::FunctionType * 832CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { 833 834 bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted; 835 assert(Inserted && "Recursively being processed?"); 836 837 SmallVector<llvm::Type*, 8> argTypes; 838 llvm::Type *resultType = 0; 839 840 const ABIArgInfo &retAI = FI.getReturnInfo(); 841 switch (retAI.getKind()) { 842 case ABIArgInfo::Expand: 843 llvm_unreachable("Invalid ABI kind for return argument"); 844 845 case ABIArgInfo::Extend: 846 case ABIArgInfo::Direct: 847 resultType = retAI.getCoerceToType(); 848 break; 849 850 case ABIArgInfo::Indirect: { 851 assert(!retAI.getIndirectAlign() && "Align unused on indirect return."); 852 resultType = llvm::Type::getVoidTy(getLLVMContext()); 853 854 QualType ret = FI.getReturnType(); 855 llvm::Type *ty = ConvertType(ret); 856 unsigned addressSpace = Context.getTargetAddressSpace(ret); 857 argTypes.push_back(llvm::PointerType::get(ty, addressSpace)); 858 break; 859 } 860 861 case ABIArgInfo::Ignore: 862 resultType = llvm::Type::getVoidTy(getLLVMContext()); 863 break; 864 } 865 866 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 867 ie = FI.arg_end(); it != ie; ++it) { 868 const ABIArgInfo &argAI = it->info; 869 870 switch (argAI.getKind()) { 871 case ABIArgInfo::Ignore: 872 break; 873 874 case ABIArgInfo::Indirect: { 875 // indirect arguments are always on the stack, which is addr space #0. 876 llvm::Type *LTy = ConvertTypeForMem(it->type); 877 argTypes.push_back(LTy->getPointerTo()); 878 break; 879 } 880 881 case ABIArgInfo::Extend: 882 case ABIArgInfo::Direct: { 883 // Insert a padding type to ensure proper alignment. 884 if (llvm::Type *PaddingType = argAI.getPaddingType()) 885 argTypes.push_back(PaddingType); 886 // If the coerce-to type is a first class aggregate, flatten it. Either 887 // way is semantically identical, but fast-isel and the optimizer 888 // generally likes scalar values better than FCAs. 889 llvm::Type *argType = argAI.getCoerceToType(); 890 if (llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) { 891 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) 892 argTypes.push_back(st->getElementType(i)); 893 } else { 894 argTypes.push_back(argType); 895 } 896 break; 897 } 898 899 case ABIArgInfo::Expand: 900 GetExpandedTypes(it->type, argTypes); 901 break; 902 } 903 } 904 905 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; 906 assert(Erased && "Not in set?"); 907 908 return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic()); 909} 910 911llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 912 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 913 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 914 915 if (!isFuncTypeConvertible(FPT)) 916 return llvm::StructType::get(getLLVMContext()); 917 918 const CGFunctionInfo *Info; 919 if (isa<CXXDestructorDecl>(MD)) 920 Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType()); 921 else 922 Info = &arrangeCXXMethodDeclaration(MD); 923 return GetFunctionType(*Info); 924} 925 926void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, 927 const Decl *TargetDecl, 928 AttributeListType &PAL, 929 unsigned &CallingConv) { 930 llvm::AttrBuilder FuncAttrs; 931 llvm::AttrBuilder RetAttrs; 932 933 CallingConv = FI.getEffectiveCallingConvention(); 934 935 if (FI.isNoReturn()) 936 FuncAttrs.addAttribute(llvm::Attributes::NoReturn); 937 938 // FIXME: handle sseregparm someday... 939 if (TargetDecl) { 940 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 941 FuncAttrs.addAttribute(llvm::Attributes::ReturnsTwice); 942 if (TargetDecl->hasAttr<NoThrowAttr>()) 943 FuncAttrs.addAttribute(llvm::Attributes::NoUnwind); 944 else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 945 const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>(); 946 if (FPT && FPT->isNothrow(getContext())) 947 FuncAttrs.addAttribute(llvm::Attributes::NoUnwind); 948 } 949 950 if (TargetDecl->hasAttr<NoReturnAttr>()) 951 FuncAttrs.addAttribute(llvm::Attributes::NoReturn); 952 953 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 954 FuncAttrs.addAttribute(llvm::Attributes::ReturnsTwice); 955 956 // 'const' and 'pure' attribute functions are also nounwind. 957 if (TargetDecl->hasAttr<ConstAttr>()) { 958 FuncAttrs.addAttribute(llvm::Attributes::ReadNone); 959 FuncAttrs.addAttribute(llvm::Attributes::NoUnwind); 960 } else if (TargetDecl->hasAttr<PureAttr>()) { 961 FuncAttrs.addAttribute(llvm::Attributes::ReadOnly); 962 FuncAttrs.addAttribute(llvm::Attributes::NoUnwind); 963 } 964 if (TargetDecl->hasAttr<MallocAttr>()) 965 RetAttrs.addAttribute(llvm::Attributes::NoAlias); 966 } 967 968 if (CodeGenOpts.OptimizeSize) 969 FuncAttrs.addAttribute(llvm::Attributes::OptimizeForSize); 970 if (CodeGenOpts.DisableRedZone) 971 FuncAttrs.addAttribute(llvm::Attributes::NoRedZone); 972 if (CodeGenOpts.NoImplicitFloat) 973 FuncAttrs.addAttribute(llvm::Attributes::NoImplicitFloat); 974 975 QualType RetTy = FI.getReturnType(); 976 unsigned Index = 1; 977 const ABIArgInfo &RetAI = FI.getReturnInfo(); 978 switch (RetAI.getKind()) { 979 case ABIArgInfo::Extend: 980 if (RetTy->hasSignedIntegerRepresentation()) 981 RetAttrs.addAttribute(llvm::Attributes::SExt); 982 else if (RetTy->hasUnsignedIntegerRepresentation()) 983 RetAttrs.addAttribute(llvm::Attributes::ZExt); 984 break; 985 case ABIArgInfo::Direct: 986 case ABIArgInfo::Ignore: 987 break; 988 989 case ABIArgInfo::Indirect: { 990 llvm::AttrBuilder SRETAttrs; 991 SRETAttrs.addAttribute(llvm::Attributes::StructRet); 992 if (RetAI.getInReg()) 993 SRETAttrs.addAttribute(llvm::Attributes::InReg); 994 PAL.push_back(llvm:: 995 AttributeWithIndex::get(Index, 996 llvm::Attributes::get(getLLVMContext(), 997 SRETAttrs))); 998 999 ++Index; 1000 // sret disables readnone and readonly 1001 FuncAttrs.removeAttribute(llvm::Attributes::ReadOnly) 1002 .removeAttribute(llvm::Attributes::ReadNone); 1003 break; 1004 } 1005 1006 case ABIArgInfo::Expand: 1007 llvm_unreachable("Invalid ABI kind for return argument"); 1008 } 1009 1010 if (RetAttrs.hasAttributes()) 1011 PAL.push_back(llvm:: 1012 AttributeWithIndex::get(llvm::AttrListPtr::ReturnIndex, 1013 llvm::Attributes::get(getLLVMContext(), 1014 RetAttrs))); 1015 1016 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 1017 ie = FI.arg_end(); it != ie; ++it) { 1018 QualType ParamType = it->type; 1019 const ABIArgInfo &AI = it->info; 1020 llvm::AttrBuilder Attrs; 1021 1022 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 1023 // have the corresponding parameter variable. It doesn't make 1024 // sense to do it here because parameters are so messed up. 1025 switch (AI.getKind()) { 1026 case ABIArgInfo::Extend: 1027 if (ParamType->isSignedIntegerOrEnumerationType()) 1028 Attrs.addAttribute(llvm::Attributes::SExt); 1029 else if (ParamType->isUnsignedIntegerOrEnumerationType()) 1030 Attrs.addAttribute(llvm::Attributes::ZExt); 1031 // FALL THROUGH 1032 case ABIArgInfo::Direct: 1033 if (AI.getInReg()) 1034 Attrs.addAttribute(llvm::Attributes::InReg); 1035 1036 // FIXME: handle sseregparm someday... 1037 1038 // Increment Index if there is padding. 1039 Index += (AI.getPaddingType() != 0); 1040 1041 if (llvm::StructType *STy = 1042 dyn_cast<llvm::StructType>(AI.getCoerceToType())) { 1043 unsigned Extra = STy->getNumElements()-1; // 1 will be added below. 1044 if (Attrs.hasAttributes()) 1045 for (unsigned I = 0; I < Extra; ++I) 1046 PAL.push_back(llvm::AttributeWithIndex::get(Index + I, 1047 llvm::Attributes::get(getLLVMContext(), 1048 Attrs))); 1049 Index += Extra; 1050 } 1051 break; 1052 1053 case ABIArgInfo::Indirect: 1054 if (AI.getIndirectByVal()) 1055 Attrs.addAttribute(llvm::Attributes::ByVal); 1056 1057 Attrs.addAlignmentAttr(AI.getIndirectAlign()); 1058 1059 // byval disables readnone and readonly. 1060 FuncAttrs.removeAttribute(llvm::Attributes::ReadOnly) 1061 .removeAttribute(llvm::Attributes::ReadNone); 1062 break; 1063 1064 case ABIArgInfo::Ignore: 1065 // Skip increment, no matching LLVM parameter. 1066 continue; 1067 1068 case ABIArgInfo::Expand: { 1069 SmallVector<llvm::Type*, 8> types; 1070 // FIXME: This is rather inefficient. Do we ever actually need to do 1071 // anything here? The result should be just reconstructed on the other 1072 // side, so extension should be a non-issue. 1073 getTypes().GetExpandedTypes(ParamType, types); 1074 Index += types.size(); 1075 continue; 1076 } 1077 } 1078 1079 if (Attrs.hasAttributes()) 1080 PAL.push_back(llvm::AttributeWithIndex::get(Index, 1081 llvm::Attributes::get(getLLVMContext(), 1082 Attrs))); 1083 ++Index; 1084 } 1085 if (FuncAttrs.hasAttributes()) 1086 PAL.push_back(llvm:: 1087 AttributeWithIndex::get(llvm::AttrListPtr::FunctionIndex, 1088 llvm::Attributes::get(getLLVMContext(), 1089 FuncAttrs))); 1090} 1091 1092/// An argument came in as a promoted argument; demote it back to its 1093/// declared type. 1094static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, 1095 const VarDecl *var, 1096 llvm::Value *value) { 1097 llvm::Type *varType = CGF.ConvertType(var->getType()); 1098 1099 // This can happen with promotions that actually don't change the 1100 // underlying type, like the enum promotions. 1101 if (value->getType() == varType) return value; 1102 1103 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) 1104 && "unexpected promotion type"); 1105 1106 if (isa<llvm::IntegerType>(varType)) 1107 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); 1108 1109 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); 1110} 1111 1112void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 1113 llvm::Function *Fn, 1114 const FunctionArgList &Args) { 1115 // If this is an implicit-return-zero function, go ahead and 1116 // initialize the return value. TODO: it might be nice to have 1117 // a more general mechanism for this that didn't require synthesized 1118 // return statements. 1119 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) { 1120 if (FD->hasImplicitReturnZero()) { 1121 QualType RetTy = FD->getResultType().getUnqualifiedType(); 1122 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 1123 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 1124 Builder.CreateStore(Zero, ReturnValue); 1125 } 1126 } 1127 1128 // FIXME: We no longer need the types from FunctionArgList; lift up and 1129 // simplify. 1130 1131 // Emit allocs for param decls. Give the LLVM Argument nodes names. 1132 llvm::Function::arg_iterator AI = Fn->arg_begin(); 1133 1134 // Name the struct return argument. 1135 if (CGM.ReturnTypeUsesSRet(FI)) { 1136 AI->setName("agg.result"); 1137 AI->addAttr(llvm::Attributes::get(getLLVMContext(), 1138 llvm::Attributes::NoAlias)); 1139 ++AI; 1140 } 1141 1142 assert(FI.arg_size() == Args.size() && 1143 "Mismatch between function signature & arguments."); 1144 unsigned ArgNo = 1; 1145 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 1146 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 1147 i != e; ++i, ++info_it, ++ArgNo) { 1148 const VarDecl *Arg = *i; 1149 QualType Ty = info_it->type; 1150 const ABIArgInfo &ArgI = info_it->info; 1151 1152 bool isPromoted = 1153 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); 1154 1155 switch (ArgI.getKind()) { 1156 case ABIArgInfo::Indirect: { 1157 llvm::Value *V = AI; 1158 1159 if (hasAggregateLLVMType(Ty)) { 1160 // Aggregates and complex variables are accessed by reference. All we 1161 // need to do is realign the value, if requested 1162 if (ArgI.getIndirectRealign()) { 1163 llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce"); 1164 1165 // Copy from the incoming argument pointer to the temporary with the 1166 // appropriate alignment. 1167 // 1168 // FIXME: We should have a common utility for generating an aggregate 1169 // copy. 1170 llvm::Type *I8PtrTy = Builder.getInt8PtrTy(); 1171 CharUnits Size = getContext().getTypeSizeInChars(Ty); 1172 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy); 1173 llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy); 1174 Builder.CreateMemCpy(Dst, 1175 Src, 1176 llvm::ConstantInt::get(IntPtrTy, 1177 Size.getQuantity()), 1178 ArgI.getIndirectAlign(), 1179 false); 1180 V = AlignedTemp; 1181 } 1182 } else { 1183 // Load scalar value from indirect argument. 1184 CharUnits Alignment = getContext().getTypeAlignInChars(Ty); 1185 V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty); 1186 1187 if (isPromoted) 1188 V = emitArgumentDemotion(*this, Arg, V); 1189 } 1190 EmitParmDecl(*Arg, V, ArgNo); 1191 break; 1192 } 1193 1194 case ABIArgInfo::Extend: 1195 case ABIArgInfo::Direct: { 1196 // Skip the dummy padding argument. 1197 if (ArgI.getPaddingType()) 1198 ++AI; 1199 1200 // If we have the trivial case, handle it with no muss and fuss. 1201 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 1202 ArgI.getCoerceToType() == ConvertType(Ty) && 1203 ArgI.getDirectOffset() == 0) { 1204 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1205 llvm::Value *V = AI; 1206 1207 if (Arg->getType().isRestrictQualified()) 1208 AI->addAttr(llvm::Attributes::get(getLLVMContext(), 1209 llvm::Attributes::NoAlias)); 1210 1211 // Ensure the argument is the correct type. 1212 if (V->getType() != ArgI.getCoerceToType()) 1213 V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); 1214 1215 if (isPromoted) 1216 V = emitArgumentDemotion(*this, Arg, V); 1217 1218 EmitParmDecl(*Arg, V, ArgNo); 1219 break; 1220 } 1221 1222 llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName()); 1223 1224 // The alignment we need to use is the max of the requested alignment for 1225 // the argument plus the alignment required by our access code below. 1226 unsigned AlignmentToUse = 1227 CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType()); 1228 AlignmentToUse = std::max(AlignmentToUse, 1229 (unsigned)getContext().getDeclAlign(Arg).getQuantity()); 1230 1231 Alloca->setAlignment(AlignmentToUse); 1232 llvm::Value *V = Alloca; 1233 llvm::Value *Ptr = V; // Pointer to store into. 1234 1235 // If the value is offset in memory, apply the offset now. 1236 if (unsigned Offs = ArgI.getDirectOffset()) { 1237 Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy()); 1238 Ptr = Builder.CreateConstGEP1_32(Ptr, Offs); 1239 Ptr = Builder.CreateBitCast(Ptr, 1240 llvm::PointerType::getUnqual(ArgI.getCoerceToType())); 1241 } 1242 1243 // If the coerce-to type is a first class aggregate, we flatten it and 1244 // pass the elements. Either way is semantically identical, but fast-isel 1245 // and the optimizer generally likes scalar values better than FCAs. 1246 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); 1247 if (STy && STy->getNumElements() > 1) { 1248 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy); 1249 llvm::Type *DstTy = 1250 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 1251 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy); 1252 1253 if (SrcSize <= DstSize) { 1254 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy)); 1255 1256 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1257 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1258 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 1259 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i); 1260 Builder.CreateStore(AI++, EltPtr); 1261 } 1262 } else { 1263 llvm::AllocaInst *TempAlloca = 1264 CreateTempAlloca(ArgI.getCoerceToType(), "coerce"); 1265 TempAlloca->setAlignment(AlignmentToUse); 1266 llvm::Value *TempV = TempAlloca; 1267 1268 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1269 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1270 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 1271 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i); 1272 Builder.CreateStore(AI++, EltPtr); 1273 } 1274 1275 Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse); 1276 } 1277 } else { 1278 // Simple case, just do a coerced store of the argument into the alloca. 1279 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1280 AI->setName(Arg->getName() + ".coerce"); 1281 CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this); 1282 } 1283 1284 1285 // Match to what EmitParmDecl is expecting for this type. 1286 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) { 1287 V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty); 1288 if (isPromoted) 1289 V = emitArgumentDemotion(*this, Arg, V); 1290 } 1291 EmitParmDecl(*Arg, V, ArgNo); 1292 continue; // Skip ++AI increment, already done. 1293 } 1294 1295 case ABIArgInfo::Expand: { 1296 // If this structure was expanded into multiple arguments then 1297 // we need to create a temporary and reconstruct it from the 1298 // arguments. 1299 llvm::AllocaInst *Alloca = CreateMemTemp(Ty); 1300 CharUnits Align = getContext().getDeclAlign(Arg); 1301 Alloca->setAlignment(Align.getQuantity()); 1302 LValue LV = MakeAddrLValue(Alloca, Ty, Align); 1303 llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI); 1304 EmitParmDecl(*Arg, Alloca, ArgNo); 1305 1306 // Name the arguments used in expansion and increment AI. 1307 unsigned Index = 0; 1308 for (; AI != End; ++AI, ++Index) 1309 AI->setName(Arg->getName() + "." + Twine(Index)); 1310 continue; 1311 } 1312 1313 case ABIArgInfo::Ignore: 1314 // Initialize the local variable appropriately. 1315 if (hasAggregateLLVMType(Ty)) 1316 EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo); 1317 else 1318 EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())), 1319 ArgNo); 1320 1321 // Skip increment, no matching LLVM parameter. 1322 continue; 1323 } 1324 1325 ++AI; 1326 } 1327 assert(AI == Fn->arg_end() && "Argument mismatch!"); 1328} 1329 1330static void eraseUnusedBitCasts(llvm::Instruction *insn) { 1331 while (insn->use_empty()) { 1332 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); 1333 if (!bitcast) return; 1334 1335 // This is "safe" because we would have used a ConstantExpr otherwise. 1336 insn = cast<llvm::Instruction>(bitcast->getOperand(0)); 1337 bitcast->eraseFromParent(); 1338 } 1339} 1340 1341/// Try to emit a fused autorelease of a return result. 1342static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, 1343 llvm::Value *result) { 1344 // We must be immediately followed the cast. 1345 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); 1346 if (BB->empty()) return 0; 1347 if (&BB->back() != result) return 0; 1348 1349 llvm::Type *resultType = result->getType(); 1350 1351 // result is in a BasicBlock and is therefore an Instruction. 1352 llvm::Instruction *generator = cast<llvm::Instruction>(result); 1353 1354 SmallVector<llvm::Instruction*,4> insnsToKill; 1355 1356 // Look for: 1357 // %generator = bitcast %type1* %generator2 to %type2* 1358 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { 1359 // We would have emitted this as a constant if the operand weren't 1360 // an Instruction. 1361 generator = cast<llvm::Instruction>(bitcast->getOperand(0)); 1362 1363 // Require the generator to be immediately followed by the cast. 1364 if (generator->getNextNode() != bitcast) 1365 return 0; 1366 1367 insnsToKill.push_back(bitcast); 1368 } 1369 1370 // Look for: 1371 // %generator = call i8* @objc_retain(i8* %originalResult) 1372 // or 1373 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) 1374 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); 1375 if (!call) return 0; 1376 1377 bool doRetainAutorelease; 1378 1379 if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) { 1380 doRetainAutorelease = true; 1381 } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints() 1382 .objc_retainAutoreleasedReturnValue) { 1383 doRetainAutorelease = false; 1384 1385 // If we emitted an assembly marker for this call (and the 1386 // ARCEntrypoints field should have been set if so), go looking 1387 // for that call. If we can't find it, we can't do this 1388 // optimization. But it should always be the immediately previous 1389 // instruction, unless we needed bitcasts around the call. 1390 if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) { 1391 llvm::Instruction *prev = call->getPrevNode(); 1392 assert(prev); 1393 if (isa<llvm::BitCastInst>(prev)) { 1394 prev = prev->getPrevNode(); 1395 assert(prev); 1396 } 1397 assert(isa<llvm::CallInst>(prev)); 1398 assert(cast<llvm::CallInst>(prev)->getCalledValue() == 1399 CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker); 1400 insnsToKill.push_back(prev); 1401 } 1402 } else { 1403 return 0; 1404 } 1405 1406 result = call->getArgOperand(0); 1407 insnsToKill.push_back(call); 1408 1409 // Keep killing bitcasts, for sanity. Note that we no longer care 1410 // about precise ordering as long as there's exactly one use. 1411 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { 1412 if (!bitcast->hasOneUse()) break; 1413 insnsToKill.push_back(bitcast); 1414 result = bitcast->getOperand(0); 1415 } 1416 1417 // Delete all the unnecessary instructions, from latest to earliest. 1418 for (SmallVectorImpl<llvm::Instruction*>::iterator 1419 i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i) 1420 (*i)->eraseFromParent(); 1421 1422 // Do the fused retain/autorelease if we were asked to. 1423 if (doRetainAutorelease) 1424 result = CGF.EmitARCRetainAutoreleaseReturnValue(result); 1425 1426 // Cast back to the result type. 1427 return CGF.Builder.CreateBitCast(result, resultType); 1428} 1429 1430/// If this is a +1 of the value of an immutable 'self', remove it. 1431static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, 1432 llvm::Value *result) { 1433 // This is only applicable to a method with an immutable 'self'. 1434 const ObjCMethodDecl *method = 1435 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl); 1436 if (!method) return 0; 1437 const VarDecl *self = method->getSelfDecl(); 1438 if (!self->getType().isConstQualified()) return 0; 1439 1440 // Look for a retain call. 1441 llvm::CallInst *retainCall = 1442 dyn_cast<llvm::CallInst>(result->stripPointerCasts()); 1443 if (!retainCall || 1444 retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain) 1445 return 0; 1446 1447 // Look for an ordinary load of 'self'. 1448 llvm::Value *retainedValue = retainCall->getArgOperand(0); 1449 llvm::LoadInst *load = 1450 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); 1451 if (!load || load->isAtomic() || load->isVolatile() || 1452 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self)) 1453 return 0; 1454 1455 // Okay! Burn it all down. This relies for correctness on the 1456 // assumption that the retain is emitted as part of the return and 1457 // that thereafter everything is used "linearly". 1458 llvm::Type *resultType = result->getType(); 1459 eraseUnusedBitCasts(cast<llvm::Instruction>(result)); 1460 assert(retainCall->use_empty()); 1461 retainCall->eraseFromParent(); 1462 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); 1463 1464 return CGF.Builder.CreateBitCast(load, resultType); 1465} 1466 1467/// Emit an ARC autorelease of the result of a function. 1468/// 1469/// \return the value to actually return from the function 1470static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, 1471 llvm::Value *result) { 1472 // If we're returning 'self', kill the initial retain. This is a 1473 // heuristic attempt to "encourage correctness" in the really unfortunate 1474 // case where we have a return of self during a dealloc and we desperately 1475 // need to avoid the possible autorelease. 1476 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) 1477 return self; 1478 1479 // At -O0, try to emit a fused retain/autorelease. 1480 if (CGF.shouldUseFusedARCCalls()) 1481 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) 1482 return fused; 1483 1484 return CGF.EmitARCAutoreleaseReturnValue(result); 1485} 1486 1487/// Heuristically search for a dominating store to the return-value slot. 1488static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { 1489 // If there are multiple uses of the return-value slot, just check 1490 // for something immediately preceding the IP. Sometimes this can 1491 // happen with how we generate implicit-returns; it can also happen 1492 // with noreturn cleanups. 1493 if (!CGF.ReturnValue->hasOneUse()) { 1494 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 1495 if (IP->empty()) return 0; 1496 llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back()); 1497 if (!store) return 0; 1498 if (store->getPointerOperand() != CGF.ReturnValue) return 0; 1499 assert(!store->isAtomic() && !store->isVolatile()); // see below 1500 return store; 1501 } 1502 1503 llvm::StoreInst *store = 1504 dyn_cast<llvm::StoreInst>(CGF.ReturnValue->use_back()); 1505 if (!store) return 0; 1506 1507 // These aren't actually possible for non-coerced returns, and we 1508 // only care about non-coerced returns on this code path. 1509 assert(!store->isAtomic() && !store->isVolatile()); 1510 1511 // Now do a first-and-dirty dominance check: just walk up the 1512 // single-predecessors chain from the current insertion point. 1513 llvm::BasicBlock *StoreBB = store->getParent(); 1514 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 1515 while (IP != StoreBB) { 1516 if (!(IP = IP->getSinglePredecessor())) 1517 return 0; 1518 } 1519 1520 // Okay, the store's basic block dominates the insertion point; we 1521 // can do our thing. 1522 return store; 1523} 1524 1525void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) { 1526 // Functions with no result always return void. 1527 if (ReturnValue == 0) { 1528 Builder.CreateRetVoid(); 1529 return; 1530 } 1531 1532 llvm::DebugLoc RetDbgLoc; 1533 llvm::Value *RV = 0; 1534 QualType RetTy = FI.getReturnType(); 1535 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1536 1537 switch (RetAI.getKind()) { 1538 case ABIArgInfo::Indirect: { 1539 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity(); 1540 if (RetTy->isAnyComplexType()) { 1541 ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false); 1542 StoreComplexToAddr(RT, CurFn->arg_begin(), false); 1543 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 1544 // Do nothing; aggregrates get evaluated directly into the destination. 1545 } else { 1546 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(), 1547 false, Alignment, RetTy); 1548 } 1549 break; 1550 } 1551 1552 case ABIArgInfo::Extend: 1553 case ABIArgInfo::Direct: 1554 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 1555 RetAI.getDirectOffset() == 0) { 1556 // The internal return value temp always will have pointer-to-return-type 1557 // type, just do a load. 1558 1559 // If there is a dominating store to ReturnValue, we can elide 1560 // the load, zap the store, and usually zap the alloca. 1561 if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) { 1562 // Get the stored value and nuke the now-dead store. 1563 RetDbgLoc = SI->getDebugLoc(); 1564 RV = SI->getValueOperand(); 1565 SI->eraseFromParent(); 1566 1567 // If that was the only use of the return value, nuke it as well now. 1568 if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) { 1569 cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent(); 1570 ReturnValue = 0; 1571 } 1572 1573 // Otherwise, we have to do a simple load. 1574 } else { 1575 RV = Builder.CreateLoad(ReturnValue); 1576 } 1577 } else { 1578 llvm::Value *V = ReturnValue; 1579 // If the value is offset in memory, apply the offset now. 1580 if (unsigned Offs = RetAI.getDirectOffset()) { 1581 V = Builder.CreateBitCast(V, Builder.getInt8PtrTy()); 1582 V = Builder.CreateConstGEP1_32(V, Offs); 1583 V = Builder.CreateBitCast(V, 1584 llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 1585 } 1586 1587 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); 1588 } 1589 1590 // In ARC, end functions that return a retainable type with a call 1591 // to objc_autoreleaseReturnValue. 1592 if (AutoreleaseResult) { 1593 assert(getLangOpts().ObjCAutoRefCount && 1594 !FI.isReturnsRetained() && 1595 RetTy->isObjCRetainableType()); 1596 RV = emitAutoreleaseOfResult(*this, RV); 1597 } 1598 1599 break; 1600 1601 case ABIArgInfo::Ignore: 1602 break; 1603 1604 case ABIArgInfo::Expand: 1605 llvm_unreachable("Invalid ABI kind for return argument"); 1606 } 1607 1608 llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid(); 1609 if (!RetDbgLoc.isUnknown()) 1610 Ret->setDebugLoc(RetDbgLoc); 1611} 1612 1613void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, 1614 const VarDecl *param) { 1615 // StartFunction converted the ABI-lowered parameter(s) into a 1616 // local alloca. We need to turn that into an r-value suitable 1617 // for EmitCall. 1618 llvm::Value *local = GetAddrOfLocalVar(param); 1619 1620 QualType type = param->getType(); 1621 1622 // For the most part, we just need to load the alloca, except: 1623 // 1) aggregate r-values are actually pointers to temporaries, and 1624 // 2) references to aggregates are pointers directly to the aggregate. 1625 // I don't know why references to non-aggregates are different here. 1626 if (const ReferenceType *ref = type->getAs<ReferenceType>()) { 1627 if (hasAggregateLLVMType(ref->getPointeeType())) 1628 return args.add(RValue::getAggregate(local), type); 1629 1630 // Locals which are references to scalars are represented 1631 // with allocas holding the pointer. 1632 return args.add(RValue::get(Builder.CreateLoad(local)), type); 1633 } 1634 1635 if (type->isAnyComplexType()) { 1636 ComplexPairTy complex = LoadComplexFromAddr(local, /*volatile*/ false); 1637 return args.add(RValue::getComplex(complex), type); 1638 } 1639 1640 if (hasAggregateLLVMType(type)) 1641 return args.add(RValue::getAggregate(local), type); 1642 1643 unsigned alignment = getContext().getDeclAlign(param).getQuantity(); 1644 llvm::Value *value = EmitLoadOfScalar(local, false, alignment, type); 1645 return args.add(RValue::get(value), type); 1646} 1647 1648static bool isProvablyNull(llvm::Value *addr) { 1649 return isa<llvm::ConstantPointerNull>(addr); 1650} 1651 1652static bool isProvablyNonNull(llvm::Value *addr) { 1653 return isa<llvm::AllocaInst>(addr); 1654} 1655 1656/// Emit the actual writing-back of a writeback. 1657static void emitWriteback(CodeGenFunction &CGF, 1658 const CallArgList::Writeback &writeback) { 1659 llvm::Value *srcAddr = writeback.Address; 1660 assert(!isProvablyNull(srcAddr) && 1661 "shouldn't have writeback for provably null argument"); 1662 1663 llvm::BasicBlock *contBB = 0; 1664 1665 // If the argument wasn't provably non-null, we need to null check 1666 // before doing the store. 1667 bool provablyNonNull = isProvablyNonNull(srcAddr); 1668 if (!provablyNonNull) { 1669 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); 1670 contBB = CGF.createBasicBlock("icr.done"); 1671 1672 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); 1673 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); 1674 CGF.EmitBlock(writebackBB); 1675 } 1676 1677 // Load the value to writeback. 1678 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); 1679 1680 // Cast it back, in case we're writing an id to a Foo* or something. 1681 value = CGF.Builder.CreateBitCast(value, 1682 cast<llvm::PointerType>(srcAddr->getType())->getElementType(), 1683 "icr.writeback-cast"); 1684 1685 // Perform the writeback. 1686 QualType srcAddrType = writeback.AddressType; 1687 CGF.EmitStoreThroughLValue(RValue::get(value), 1688 CGF.MakeAddrLValue(srcAddr, srcAddrType)); 1689 1690 // Jump to the continuation block. 1691 if (!provablyNonNull) 1692 CGF.EmitBlock(contBB); 1693} 1694 1695static void emitWritebacks(CodeGenFunction &CGF, 1696 const CallArgList &args) { 1697 for (CallArgList::writeback_iterator 1698 i = args.writeback_begin(), e = args.writeback_end(); i != e; ++i) 1699 emitWriteback(CGF, *i); 1700} 1701 1702/// Emit an argument that's being passed call-by-writeback. That is, 1703/// we are passing the address of 1704static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, 1705 const ObjCIndirectCopyRestoreExpr *CRE) { 1706 llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr()); 1707 1708 // The dest and src types don't necessarily match in LLVM terms 1709 // because of the crazy ObjC compatibility rules. 1710 1711 llvm::PointerType *destType = 1712 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); 1713 1714 // If the address is a constant null, just pass the appropriate null. 1715 if (isProvablyNull(srcAddr)) { 1716 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), 1717 CRE->getType()); 1718 return; 1719 } 1720 1721 QualType srcAddrType = 1722 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); 1723 1724 // Create the temporary. 1725 llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(), 1726 "icr.temp"); 1727 1728 // Zero-initialize it if we're not doing a copy-initialization. 1729 bool shouldCopy = CRE->shouldCopy(); 1730 if (!shouldCopy) { 1731 llvm::Value *null = 1732 llvm::ConstantPointerNull::get( 1733 cast<llvm::PointerType>(destType->getElementType())); 1734 CGF.Builder.CreateStore(null, temp); 1735 } 1736 1737 llvm::BasicBlock *contBB = 0; 1738 1739 // If the address is *not* known to be non-null, we need to switch. 1740 llvm::Value *finalArgument; 1741 1742 bool provablyNonNull = isProvablyNonNull(srcAddr); 1743 if (provablyNonNull) { 1744 finalArgument = temp; 1745 } else { 1746 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); 1747 1748 finalArgument = CGF.Builder.CreateSelect(isNull, 1749 llvm::ConstantPointerNull::get(destType), 1750 temp, "icr.argument"); 1751 1752 // If we need to copy, then the load has to be conditional, which 1753 // means we need control flow. 1754 if (shouldCopy) { 1755 contBB = CGF.createBasicBlock("icr.cont"); 1756 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); 1757 CGF.Builder.CreateCondBr(isNull, contBB, copyBB); 1758 CGF.EmitBlock(copyBB); 1759 } 1760 } 1761 1762 // Perform a copy if necessary. 1763 if (shouldCopy) { 1764 LValue srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType); 1765 RValue srcRV = CGF.EmitLoadOfLValue(srcLV); 1766 assert(srcRV.isScalar()); 1767 1768 llvm::Value *src = srcRV.getScalarVal(); 1769 src = CGF.Builder.CreateBitCast(src, destType->getElementType(), 1770 "icr.cast"); 1771 1772 // Use an ordinary store, not a store-to-lvalue. 1773 CGF.Builder.CreateStore(src, temp); 1774 } 1775 1776 // Finish the control flow if we needed it. 1777 if (shouldCopy && !provablyNonNull) 1778 CGF.EmitBlock(contBB); 1779 1780 args.addWriteback(srcAddr, srcAddrType, temp); 1781 args.add(RValue::get(finalArgument), CRE->getType()); 1782} 1783 1784void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, 1785 QualType type) { 1786 if (const ObjCIndirectCopyRestoreExpr *CRE 1787 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { 1788 assert(getContext().getLangOpts().ObjCAutoRefCount); 1789 assert(getContext().hasSameType(E->getType(), type)); 1790 return emitWritebackArg(*this, args, CRE); 1791 } 1792 1793 assert(type->isReferenceType() == E->isGLValue() && 1794 "reference binding to unmaterialized r-value!"); 1795 1796 if (E->isGLValue()) { 1797 assert(E->getObjectKind() == OK_Ordinary); 1798 return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0), 1799 type); 1800 } 1801 1802 if (hasAggregateLLVMType(type) && !E->getType()->isAnyComplexType() && 1803 isa<ImplicitCastExpr>(E) && 1804 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { 1805 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); 1806 assert(L.isSimple()); 1807 args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true); 1808 return; 1809 } 1810 1811 args.add(EmitAnyExprToTemp(E), type); 1812} 1813 1814// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 1815// optimizer it can aggressively ignore unwind edges. 1816void 1817CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { 1818 if (CGM.getCodeGenOpts().OptimizationLevel != 0 && 1819 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) 1820 Inst->setMetadata("clang.arc.no_objc_arc_exceptions", 1821 CGM.getNoObjCARCExceptionsMetadata()); 1822} 1823 1824/// Emits a call or invoke instruction to the given function, depending 1825/// on the current state of the EH stack. 1826llvm::CallSite 1827CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 1828 ArrayRef<llvm::Value *> Args, 1829 const Twine &Name) { 1830 llvm::BasicBlock *InvokeDest = getInvokeDest(); 1831 1832 llvm::Instruction *Inst; 1833 if (!InvokeDest) 1834 Inst = Builder.CreateCall(Callee, Args, Name); 1835 else { 1836 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 1837 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name); 1838 EmitBlock(ContBB); 1839 } 1840 1841 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 1842 // optimizer it can aggressively ignore unwind edges. 1843 if (CGM.getLangOpts().ObjCAutoRefCount) 1844 AddObjCARCExceptionMetadata(Inst); 1845 1846 return Inst; 1847} 1848 1849llvm::CallSite 1850CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 1851 const Twine &Name) { 1852 return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name); 1853} 1854 1855static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo, 1856 llvm::FunctionType *FTy) { 1857 if (ArgNo < FTy->getNumParams()) 1858 assert(Elt->getType() == FTy->getParamType(ArgNo)); 1859 else 1860 assert(FTy->isVarArg()); 1861 ++ArgNo; 1862} 1863 1864void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV, 1865 SmallVector<llvm::Value*,16> &Args, 1866 llvm::FunctionType *IRFuncTy) { 1867 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 1868 unsigned NumElts = AT->getSize().getZExtValue(); 1869 QualType EltTy = AT->getElementType(); 1870 llvm::Value *Addr = RV.getAggregateAddr(); 1871 for (unsigned Elt = 0; Elt < NumElts; ++Elt) { 1872 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt); 1873 LValue LV = MakeAddrLValue(EltAddr, EltTy); 1874 RValue EltRV; 1875 if (EltTy->isAnyComplexType()) 1876 // FIXME: Volatile? 1877 EltRV = RValue::getComplex(LoadComplexFromAddr(LV.getAddress(), false)); 1878 else if (CodeGenFunction::hasAggregateLLVMType(EltTy)) 1879 EltRV = LV.asAggregateRValue(); 1880 else 1881 EltRV = EmitLoadOfLValue(LV); 1882 ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy); 1883 } 1884 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 1885 RecordDecl *RD = RT->getDecl(); 1886 assert(RV.isAggregate() && "Unexpected rvalue during struct expansion"); 1887 LValue LV = MakeAddrLValue(RV.getAggregateAddr(), Ty); 1888 1889 if (RD->isUnion()) { 1890 const FieldDecl *LargestFD = 0; 1891 CharUnits UnionSize = CharUnits::Zero(); 1892 1893 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1894 i != e; ++i) { 1895 const FieldDecl *FD = *i; 1896 assert(!FD->isBitField() && 1897 "Cannot expand structure with bit-field members."); 1898 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType()); 1899 if (UnionSize < FieldSize) { 1900 UnionSize = FieldSize; 1901 LargestFD = FD; 1902 } 1903 } 1904 if (LargestFD) { 1905 RValue FldRV = EmitRValueForField(LV, LargestFD); 1906 ExpandTypeToArgs(LargestFD->getType(), FldRV, Args, IRFuncTy); 1907 } 1908 } else { 1909 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1910 i != e; ++i) { 1911 FieldDecl *FD = *i; 1912 1913 RValue FldRV = EmitRValueForField(LV, FD); 1914 ExpandTypeToArgs(FD->getType(), FldRV, Args, IRFuncTy); 1915 } 1916 } 1917 } else if (Ty->isAnyComplexType()) { 1918 ComplexPairTy CV = RV.getComplexVal(); 1919 Args.push_back(CV.first); 1920 Args.push_back(CV.second); 1921 } else { 1922 assert(RV.isScalar() && 1923 "Unexpected non-scalar rvalue during struct expansion."); 1924 1925 // Insert a bitcast as needed. 1926 llvm::Value *V = RV.getScalarVal(); 1927 if (Args.size() < IRFuncTy->getNumParams() && 1928 V->getType() != IRFuncTy->getParamType(Args.size())) 1929 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size())); 1930 1931 Args.push_back(V); 1932 } 1933} 1934 1935 1936RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 1937 llvm::Value *Callee, 1938 ReturnValueSlot ReturnValue, 1939 const CallArgList &CallArgs, 1940 const Decl *TargetDecl, 1941 llvm::Instruction **callOrInvoke) { 1942 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 1943 SmallVector<llvm::Value*, 16> Args; 1944 1945 // Handle struct-return functions by passing a pointer to the 1946 // location that we would like to return into. 1947 QualType RetTy = CallInfo.getReturnType(); 1948 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 1949 1950 // IRArgNo - Keep track of the argument number in the callee we're looking at. 1951 unsigned IRArgNo = 0; 1952 llvm::FunctionType *IRFuncTy = 1953 cast<llvm::FunctionType>( 1954 cast<llvm::PointerType>(Callee->getType())->getElementType()); 1955 1956 // If the call returns a temporary with struct return, create a temporary 1957 // alloca to hold the result, unless one is given to us. 1958 if (CGM.ReturnTypeUsesSRet(CallInfo)) { 1959 llvm::Value *Value = ReturnValue.getValue(); 1960 if (!Value) 1961 Value = CreateMemTemp(RetTy); 1962 Args.push_back(Value); 1963 checkArgMatches(Value, IRArgNo, IRFuncTy); 1964 } 1965 1966 assert(CallInfo.arg_size() == CallArgs.size() && 1967 "Mismatch between function signature & arguments."); 1968 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 1969 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 1970 I != E; ++I, ++info_it) { 1971 const ABIArgInfo &ArgInfo = info_it->info; 1972 RValue RV = I->RV; 1973 1974 unsigned TypeAlign = 1975 getContext().getTypeAlignInChars(I->Ty).getQuantity(); 1976 switch (ArgInfo.getKind()) { 1977 case ABIArgInfo::Indirect: { 1978 if (RV.isScalar() || RV.isComplex()) { 1979 // Make a temporary alloca to pass the argument. 1980 llvm::AllocaInst *AI = CreateMemTemp(I->Ty); 1981 if (ArgInfo.getIndirectAlign() > AI->getAlignment()) 1982 AI->setAlignment(ArgInfo.getIndirectAlign()); 1983 Args.push_back(AI); 1984 1985 if (RV.isScalar()) 1986 EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, 1987 TypeAlign, I->Ty); 1988 else 1989 StoreComplexToAddr(RV.getComplexVal(), Args.back(), false); 1990 1991 // Validate argument match. 1992 checkArgMatches(AI, IRArgNo, IRFuncTy); 1993 } else { 1994 // We want to avoid creating an unnecessary temporary+copy here; 1995 // however, we need one in two cases: 1996 // 1. If the argument is not byval, and we are required to copy the 1997 // source. (This case doesn't occur on any common architecture.) 1998 // 2. If the argument is byval, RV is not sufficiently aligned, and 1999 // we cannot force it to be sufficiently aligned. 2000 llvm::Value *Addr = RV.getAggregateAddr(); 2001 unsigned Align = ArgInfo.getIndirectAlign(); 2002 const llvm::DataLayout *TD = &CGM.getDataLayout(); 2003 if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) || 2004 (ArgInfo.getIndirectByVal() && TypeAlign < Align && 2005 llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align)) { 2006 // Create an aligned temporary, and copy to it. 2007 llvm::AllocaInst *AI = CreateMemTemp(I->Ty); 2008 if (Align > AI->getAlignment()) 2009 AI->setAlignment(Align); 2010 Args.push_back(AI); 2011 EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified()); 2012 2013 // Validate argument match. 2014 checkArgMatches(AI, IRArgNo, IRFuncTy); 2015 } else { 2016 // Skip the extra memcpy call. 2017 Args.push_back(Addr); 2018 2019 // Validate argument match. 2020 checkArgMatches(Addr, IRArgNo, IRFuncTy); 2021 } 2022 } 2023 break; 2024 } 2025 2026 case ABIArgInfo::Ignore: 2027 break; 2028 2029 case ABIArgInfo::Extend: 2030 case ABIArgInfo::Direct: { 2031 // Insert a padding argument to ensure proper alignment. 2032 if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) { 2033 Args.push_back(llvm::UndefValue::get(PaddingType)); 2034 ++IRArgNo; 2035 } 2036 2037 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 2038 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 2039 ArgInfo.getDirectOffset() == 0) { 2040 llvm::Value *V; 2041 if (RV.isScalar()) 2042 V = RV.getScalarVal(); 2043 else 2044 V = Builder.CreateLoad(RV.getAggregateAddr()); 2045 2046 // If the argument doesn't match, perform a bitcast to coerce it. This 2047 // can happen due to trivial type mismatches. 2048 if (IRArgNo < IRFuncTy->getNumParams() && 2049 V->getType() != IRFuncTy->getParamType(IRArgNo)) 2050 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo)); 2051 Args.push_back(V); 2052 2053 checkArgMatches(V, IRArgNo, IRFuncTy); 2054 break; 2055 } 2056 2057 // FIXME: Avoid the conversion through memory if possible. 2058 llvm::Value *SrcPtr; 2059 if (RV.isScalar()) { 2060 SrcPtr = CreateMemTemp(I->Ty, "coerce"); 2061 EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, TypeAlign, I->Ty); 2062 } else if (RV.isComplex()) { 2063 SrcPtr = CreateMemTemp(I->Ty, "coerce"); 2064 StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false); 2065 } else 2066 SrcPtr = RV.getAggregateAddr(); 2067 2068 // If the value is offset in memory, apply the offset now. 2069 if (unsigned Offs = ArgInfo.getDirectOffset()) { 2070 SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy()); 2071 SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs); 2072 SrcPtr = Builder.CreateBitCast(SrcPtr, 2073 llvm::PointerType::getUnqual(ArgInfo.getCoerceToType())); 2074 2075 } 2076 2077 // If the coerce-to type is a first class aggregate, we flatten it and 2078 // pass the elements. Either way is semantically identical, but fast-isel 2079 // and the optimizer generally likes scalar values better than FCAs. 2080 if (llvm::StructType *STy = 2081 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) { 2082 llvm::Type *SrcTy = 2083 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 2084 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); 2085 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); 2086 2087 // If the source type is smaller than the destination type of the 2088 // coerce-to logic, copy the source value into a temp alloca the size 2089 // of the destination type to allow loading all of it. The bits past 2090 // the source value are left undef. 2091 if (SrcSize < DstSize) { 2092 llvm::AllocaInst *TempAlloca 2093 = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce"); 2094 Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0); 2095 SrcPtr = TempAlloca; 2096 } else { 2097 SrcPtr = Builder.CreateBitCast(SrcPtr, 2098 llvm::PointerType::getUnqual(STy)); 2099 } 2100 2101 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2102 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i); 2103 llvm::LoadInst *LI = Builder.CreateLoad(EltPtr); 2104 // We don't know what we're loading from. 2105 LI->setAlignment(1); 2106 Args.push_back(LI); 2107 2108 // Validate argument match. 2109 checkArgMatches(LI, IRArgNo, IRFuncTy); 2110 } 2111 } else { 2112 // In the simple case, just pass the coerced loaded value. 2113 Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), 2114 *this)); 2115 2116 // Validate argument match. 2117 checkArgMatches(Args.back(), IRArgNo, IRFuncTy); 2118 } 2119 2120 break; 2121 } 2122 2123 case ABIArgInfo::Expand: 2124 ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy); 2125 IRArgNo = Args.size(); 2126 break; 2127 } 2128 } 2129 2130 // If the callee is a bitcast of a function to a varargs pointer to function 2131 // type, check to see if we can remove the bitcast. This handles some cases 2132 // with unprototyped functions. 2133 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee)) 2134 if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) { 2135 llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType()); 2136 llvm::FunctionType *CurFT = 2137 cast<llvm::FunctionType>(CurPT->getElementType()); 2138 llvm::FunctionType *ActualFT = CalleeF->getFunctionType(); 2139 2140 if (CE->getOpcode() == llvm::Instruction::BitCast && 2141 ActualFT->getReturnType() == CurFT->getReturnType() && 2142 ActualFT->getNumParams() == CurFT->getNumParams() && 2143 ActualFT->getNumParams() == Args.size() && 2144 (CurFT->isVarArg() || !ActualFT->isVarArg())) { 2145 bool ArgsMatch = true; 2146 for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i) 2147 if (ActualFT->getParamType(i) != CurFT->getParamType(i)) { 2148 ArgsMatch = false; 2149 break; 2150 } 2151 2152 // Strip the cast if we can get away with it. This is a nice cleanup, 2153 // but also allows us to inline the function at -O0 if it is marked 2154 // always_inline. 2155 if (ArgsMatch) 2156 Callee = CalleeF; 2157 } 2158 } 2159 2160 unsigned CallingConv; 2161 CodeGen::AttributeListType AttributeList; 2162 CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv); 2163 llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList); 2164 2165 llvm::BasicBlock *InvokeDest = 0; 2166 if (!Attrs.getFnAttributes().hasAttribute(llvm::Attributes::NoUnwind)) 2167 InvokeDest = getInvokeDest(); 2168 2169 llvm::CallSite CS; 2170 if (!InvokeDest) { 2171 CS = Builder.CreateCall(Callee, Args); 2172 } else { 2173 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 2174 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args); 2175 EmitBlock(Cont); 2176 } 2177 if (callOrInvoke) 2178 *callOrInvoke = CS.getInstruction(); 2179 2180 CS.setAttributes(Attrs); 2181 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 2182 2183 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 2184 // optimizer it can aggressively ignore unwind edges. 2185 if (CGM.getLangOpts().ObjCAutoRefCount) 2186 AddObjCARCExceptionMetadata(CS.getInstruction()); 2187 2188 // If the call doesn't return, finish the basic block and clear the 2189 // insertion point; this allows the rest of IRgen to discard 2190 // unreachable code. 2191 if (CS.doesNotReturn()) { 2192 Builder.CreateUnreachable(); 2193 Builder.ClearInsertionPoint(); 2194 2195 // FIXME: For now, emit a dummy basic block because expr emitters in 2196 // generally are not ready to handle emitting expressions at unreachable 2197 // points. 2198 EnsureInsertPoint(); 2199 2200 // Return a reasonable RValue. 2201 return GetUndefRValue(RetTy); 2202 } 2203 2204 llvm::Instruction *CI = CS.getInstruction(); 2205 if (Builder.isNamePreserving() && !CI->getType()->isVoidTy()) 2206 CI->setName("call"); 2207 2208 // Emit any writebacks immediately. Arguably this should happen 2209 // after any return-value munging. 2210 if (CallArgs.hasWritebacks()) 2211 emitWritebacks(*this, CallArgs); 2212 2213 switch (RetAI.getKind()) { 2214 case ABIArgInfo::Indirect: { 2215 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity(); 2216 if (RetTy->isAnyComplexType()) 2217 return RValue::getComplex(LoadComplexFromAddr(Args[0], false)); 2218 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) 2219 return RValue::getAggregate(Args[0]); 2220 return RValue::get(EmitLoadOfScalar(Args[0], false, Alignment, RetTy)); 2221 } 2222 2223 case ABIArgInfo::Ignore: 2224 // If we are ignoring an argument that had a result, make sure to 2225 // construct the appropriate return value for our caller. 2226 return GetUndefRValue(RetTy); 2227 2228 case ABIArgInfo::Extend: 2229 case ABIArgInfo::Direct: { 2230 llvm::Type *RetIRTy = ConvertType(RetTy); 2231 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { 2232 if (RetTy->isAnyComplexType()) { 2233 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 2234 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 2235 return RValue::getComplex(std::make_pair(Real, Imag)); 2236 } 2237 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 2238 llvm::Value *DestPtr = ReturnValue.getValue(); 2239 bool DestIsVolatile = ReturnValue.isVolatile(); 2240 2241 if (!DestPtr) { 2242 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 2243 DestIsVolatile = false; 2244 } 2245 BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false); 2246 return RValue::getAggregate(DestPtr); 2247 } 2248 2249 // If the argument doesn't match, perform a bitcast to coerce it. This 2250 // can happen due to trivial type mismatches. 2251 llvm::Value *V = CI; 2252 if (V->getType() != RetIRTy) 2253 V = Builder.CreateBitCast(V, RetIRTy); 2254 return RValue::get(V); 2255 } 2256 2257 llvm::Value *DestPtr = ReturnValue.getValue(); 2258 bool DestIsVolatile = ReturnValue.isVolatile(); 2259 2260 if (!DestPtr) { 2261 DestPtr = CreateMemTemp(RetTy, "coerce"); 2262 DestIsVolatile = false; 2263 } 2264 2265 // If the value is offset in memory, apply the offset now. 2266 llvm::Value *StorePtr = DestPtr; 2267 if (unsigned Offs = RetAI.getDirectOffset()) { 2268 StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy()); 2269 StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs); 2270 StorePtr = Builder.CreateBitCast(StorePtr, 2271 llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 2272 } 2273 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); 2274 2275 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity(); 2276 if (RetTy->isAnyComplexType()) 2277 return RValue::getComplex(LoadComplexFromAddr(DestPtr, false)); 2278 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) 2279 return RValue::getAggregate(DestPtr); 2280 return RValue::get(EmitLoadOfScalar(DestPtr, false, Alignment, RetTy)); 2281 } 2282 2283 case ABIArgInfo::Expand: 2284 llvm_unreachable("Invalid ABI kind for return argument"); 2285 } 2286 2287 llvm_unreachable("Unhandled ABIArgInfo::Kind"); 2288} 2289 2290/* VarArg handling */ 2291 2292llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) { 2293 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this); 2294} 2295