CGCall.cpp revision 50e6b18f99c45b31e6216ab221f6b3911b24fa1f
1//===--- CGCall.cpp - Encapsulate calling convention details ----*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// These classes wrap the information about a call or function 11// definition used to handle ABI compliancy. 12// 13//===----------------------------------------------------------------------===// 14 15#include "CGCall.h" 16#include "CGCXXABI.h" 17#include "ABIInfo.h" 18#include "CodeGenFunction.h" 19#include "CodeGenModule.h" 20#include "TargetInfo.h" 21#include "clang/Basic/TargetInfo.h" 22#include "clang/AST/Decl.h" 23#include "clang/AST/DeclCXX.h" 24#include "clang/AST/DeclObjC.h" 25#include "clang/Frontend/CodeGenOptions.h" 26#include "llvm/Attributes.h" 27#include "llvm/Support/CallSite.h" 28#include "llvm/DataLayout.h" 29#include "llvm/InlineAsm.h" 30#include "llvm/Transforms/Utils/Local.h" 31using namespace clang; 32using namespace CodeGen; 33 34/***/ 35 36static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) { 37 switch (CC) { 38 default: return llvm::CallingConv::C; 39 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 40 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 41 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 42 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; 43 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 44 // TODO: add support for CC_X86Pascal to llvm 45 } 46} 47 48/// Derives the 'this' type for codegen purposes, i.e. ignoring method 49/// qualification. 50/// FIXME: address space qualification? 51static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) { 52 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 53 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 54} 55 56/// Returns the canonical formal type of the given C++ method. 57static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 58 return MD->getType()->getCanonicalTypeUnqualified() 59 .getAs<FunctionProtoType>(); 60} 61 62/// Returns the "extra-canonicalized" return type, which discards 63/// qualifiers on the return type. Codegen doesn't care about them, 64/// and it makes ABI code a little easier to be able to assume that 65/// all parameter and return types are top-level unqualified. 66static CanQualType GetReturnType(QualType RetTy) { 67 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 68} 69 70/// Arrange the argument and result information for a value of the given 71/// unprototyped freestanding function type. 72const CGFunctionInfo & 73CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { 74 // When translating an unprototyped function type, always use a 75 // variadic type. 76 return arrangeLLVMFunctionInfo(FTNP->getResultType().getUnqualifiedType(), 77 ArrayRef<CanQualType>(), 78 FTNP->getExtInfo(), 79 RequiredArgs(0)); 80} 81 82/// Arrange the LLVM function layout for a value of the given function 83/// type, on top of any implicit parameters already stored. Use the 84/// given ExtInfo instead of the ExtInfo from the function type. 85static const CGFunctionInfo &arrangeLLVMFunctionInfo(CodeGenTypes &CGT, 86 SmallVectorImpl<CanQualType> &prefix, 87 CanQual<FunctionProtoType> FTP, 88 FunctionType::ExtInfo extInfo) { 89 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); 90 // FIXME: Kill copy. 91 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) 92 prefix.push_back(FTP->getArgType(i)); 93 CanQualType resultType = FTP->getResultType().getUnqualifiedType(); 94 return CGT.arrangeLLVMFunctionInfo(resultType, prefix, extInfo, required); 95} 96 97/// Arrange the argument and result information for a free function (i.e. 98/// not a C++ or ObjC instance method) of the given type. 99static const CGFunctionInfo &arrangeFreeFunctionType(CodeGenTypes &CGT, 100 SmallVectorImpl<CanQualType> &prefix, 101 CanQual<FunctionProtoType> FTP) { 102 return arrangeLLVMFunctionInfo(CGT, prefix, FTP, FTP->getExtInfo()); 103} 104 105/// Given the formal ext-info of a C++ instance method, adjust it 106/// according to the C++ ABI in effect. 107static void adjustCXXMethodInfo(CodeGenTypes &CGT, 108 FunctionType::ExtInfo &extInfo, 109 bool isVariadic) { 110 if (extInfo.getCC() == CC_Default) { 111 CallingConv CC = CGT.getContext().getDefaultCXXMethodCallConv(isVariadic); 112 extInfo = extInfo.withCallingConv(CC); 113 } 114} 115 116/// Arrange the argument and result information for a free function (i.e. 117/// not a C++ or ObjC instance method) of the given type. 118static const CGFunctionInfo &arrangeCXXMethodType(CodeGenTypes &CGT, 119 SmallVectorImpl<CanQualType> &prefix, 120 CanQual<FunctionProtoType> FTP) { 121 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 122 adjustCXXMethodInfo(CGT, extInfo, FTP->isVariadic()); 123 return arrangeLLVMFunctionInfo(CGT, prefix, FTP, extInfo); 124} 125 126/// Arrange the argument and result information for a value of the 127/// given freestanding function type. 128const CGFunctionInfo & 129CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) { 130 SmallVector<CanQualType, 16> argTypes; 131 return ::arrangeFreeFunctionType(*this, argTypes, FTP); 132} 133 134static CallingConv getCallingConventionForDecl(const Decl *D) { 135 // Set the appropriate calling convention for the Function. 136 if (D->hasAttr<StdCallAttr>()) 137 return CC_X86StdCall; 138 139 if (D->hasAttr<FastCallAttr>()) 140 return CC_X86FastCall; 141 142 if (D->hasAttr<ThisCallAttr>()) 143 return CC_X86ThisCall; 144 145 if (D->hasAttr<PascalAttr>()) 146 return CC_X86Pascal; 147 148 if (PcsAttr *PCS = D->getAttr<PcsAttr>()) 149 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); 150 151 return CC_C; 152} 153 154/// Arrange the argument and result information for a call to an 155/// unknown C++ non-static member function of the given abstract type. 156/// The member function must be an ordinary function, i.e. not a 157/// constructor or destructor. 158const CGFunctionInfo & 159CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, 160 const FunctionProtoType *FTP) { 161 SmallVector<CanQualType, 16> argTypes; 162 163 // Add the 'this' pointer. 164 argTypes.push_back(GetThisType(Context, RD)); 165 166 return ::arrangeCXXMethodType(*this, argTypes, 167 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); 168} 169 170/// Arrange the argument and result information for a declaration or 171/// definition of the given C++ non-static member function. The 172/// member function must be an ordinary function, i.e. not a 173/// constructor or destructor. 174const CGFunctionInfo & 175CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { 176 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!"); 177 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 178 179 CanQual<FunctionProtoType> prototype = GetFormalType(MD); 180 181 if (MD->isInstance()) { 182 // The abstract case is perfectly fine. 183 return arrangeCXXMethodType(MD->getParent(), prototype.getTypePtr()); 184 } 185 186 return arrangeFreeFunctionType(prototype); 187} 188 189/// Arrange the argument and result information for a declaration 190/// or definition to the given constructor variant. 191const CGFunctionInfo & 192CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D, 193 CXXCtorType ctorKind) { 194 SmallVector<CanQualType, 16> argTypes; 195 argTypes.push_back(GetThisType(Context, D->getParent())); 196 CanQualType resultType = Context.VoidTy; 197 198 TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes); 199 200 CanQual<FunctionProtoType> FTP = GetFormalType(D); 201 202 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size()); 203 204 // Add the formal parameters. 205 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) 206 argTypes.push_back(FTP->getArgType(i)); 207 208 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 209 adjustCXXMethodInfo(*this, extInfo, FTP->isVariadic()); 210 return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo, required); 211} 212 213/// Arrange the argument and result information for a declaration, 214/// definition, or call to the given destructor variant. It so 215/// happens that all three cases produce the same information. 216const CGFunctionInfo & 217CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D, 218 CXXDtorType dtorKind) { 219 SmallVector<CanQualType, 2> argTypes; 220 argTypes.push_back(GetThisType(Context, D->getParent())); 221 CanQualType resultType = Context.VoidTy; 222 223 TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes); 224 225 CanQual<FunctionProtoType> FTP = GetFormalType(D); 226 assert(FTP->getNumArgs() == 0 && "dtor with formal parameters"); 227 assert(FTP->isVariadic() == 0 && "dtor with formal parameters"); 228 229 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 230 adjustCXXMethodInfo(*this, extInfo, false); 231 return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo, 232 RequiredArgs::All); 233} 234 235/// Arrange the argument and result information for the declaration or 236/// definition of the given function. 237const CGFunctionInfo & 238CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { 239 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 240 if (MD->isInstance()) 241 return arrangeCXXMethodDeclaration(MD); 242 243 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 244 245 assert(isa<FunctionType>(FTy)); 246 247 // When declaring a function without a prototype, always use a 248 // non-variadic type. 249 if (isa<FunctionNoProtoType>(FTy)) { 250 CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>(); 251 return arrangeLLVMFunctionInfo(noProto->getResultType(), 252 ArrayRef<CanQualType>(), 253 noProto->getExtInfo(), 254 RequiredArgs::All); 255 } 256 257 assert(isa<FunctionProtoType>(FTy)); 258 return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>()); 259} 260 261/// Arrange the argument and result information for the declaration or 262/// definition of an Objective-C method. 263const CGFunctionInfo & 264CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { 265 // It happens that this is the same as a call with no optional 266 // arguments, except also using the formal 'self' type. 267 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); 268} 269 270/// Arrange the argument and result information for the function type 271/// through which to perform a send to the given Objective-C method, 272/// using the given receiver type. The receiver type is not always 273/// the 'self' type of the method or even an Objective-C pointer type. 274/// This is *not* the right method for actually performing such a 275/// message send, due to the possibility of optional arguments. 276const CGFunctionInfo & 277CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, 278 QualType receiverType) { 279 SmallVector<CanQualType, 16> argTys; 280 argTys.push_back(Context.getCanonicalParamType(receiverType)); 281 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 282 // FIXME: Kill copy? 283 for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(), 284 e = MD->param_end(); i != e; ++i) { 285 argTys.push_back(Context.getCanonicalParamType((*i)->getType())); 286 } 287 288 FunctionType::ExtInfo einfo; 289 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD)); 290 291 if (getContext().getLangOpts().ObjCAutoRefCount && 292 MD->hasAttr<NSReturnsRetainedAttr>()) 293 einfo = einfo.withProducesResult(true); 294 295 RequiredArgs required = 296 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); 297 298 return arrangeLLVMFunctionInfo(GetReturnType(MD->getResultType()), argTys, 299 einfo, required); 300} 301 302const CGFunctionInfo & 303CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { 304 // FIXME: Do we need to handle ObjCMethodDecl? 305 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 306 307 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 308 return arrangeCXXConstructorDeclaration(CD, GD.getCtorType()); 309 310 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) 311 return arrangeCXXDestructor(DD, GD.getDtorType()); 312 313 return arrangeFunctionDeclaration(FD); 314} 315 316/// Figure out the rules for calling a function with the given formal 317/// type using the given arguments. The arguments are necessary 318/// because the function might be unprototyped, in which case it's 319/// target-dependent in crazy ways. 320const CGFunctionInfo & 321CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, 322 const FunctionType *fnType) { 323 RequiredArgs required = RequiredArgs::All; 324 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { 325 if (proto->isVariadic()) 326 required = RequiredArgs(proto->getNumArgs()); 327 } else if (CGM.getTargetCodeGenInfo() 328 .isNoProtoCallVariadic(args, cast<FunctionNoProtoType>(fnType))) { 329 required = RequiredArgs(0); 330 } 331 332 return arrangeFreeFunctionCall(fnType->getResultType(), args, 333 fnType->getExtInfo(), required); 334} 335 336const CGFunctionInfo & 337CodeGenTypes::arrangeFreeFunctionCall(QualType resultType, 338 const CallArgList &args, 339 FunctionType::ExtInfo info, 340 RequiredArgs required) { 341 // FIXME: Kill copy. 342 SmallVector<CanQualType, 16> argTypes; 343 for (CallArgList::const_iterator i = args.begin(), e = args.end(); 344 i != e; ++i) 345 argTypes.push_back(Context.getCanonicalParamType(i->Ty)); 346 return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info, 347 required); 348} 349 350/// Arrange a call to a C++ method, passing the given arguments. 351const CGFunctionInfo & 352CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, 353 const FunctionProtoType *FPT, 354 RequiredArgs required) { 355 // FIXME: Kill copy. 356 SmallVector<CanQualType, 16> argTypes; 357 for (CallArgList::const_iterator i = args.begin(), e = args.end(); 358 i != e; ++i) 359 argTypes.push_back(Context.getCanonicalParamType(i->Ty)); 360 361 FunctionType::ExtInfo info = FPT->getExtInfo(); 362 adjustCXXMethodInfo(*this, info, FPT->isVariadic()); 363 return arrangeLLVMFunctionInfo(GetReturnType(FPT->getResultType()), 364 argTypes, info, required); 365} 366 367const CGFunctionInfo & 368CodeGenTypes::arrangeFunctionDeclaration(QualType resultType, 369 const FunctionArgList &args, 370 const FunctionType::ExtInfo &info, 371 bool isVariadic) { 372 // FIXME: Kill copy. 373 SmallVector<CanQualType, 16> argTypes; 374 for (FunctionArgList::const_iterator i = args.begin(), e = args.end(); 375 i != e; ++i) 376 argTypes.push_back(Context.getCanonicalParamType((*i)->getType())); 377 378 RequiredArgs required = 379 (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All); 380 return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info, 381 required); 382} 383 384const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { 385 return arrangeLLVMFunctionInfo(getContext().VoidTy, ArrayRef<CanQualType>(), 386 FunctionType::ExtInfo(), RequiredArgs::All); 387} 388 389/// Arrange the argument and result information for an abstract value 390/// of a given function type. This is the method which all of the 391/// above functions ultimately defer to. 392const CGFunctionInfo & 393CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, 394 ArrayRef<CanQualType> argTypes, 395 FunctionType::ExtInfo info, 396 RequiredArgs required) { 397#ifndef NDEBUG 398 for (ArrayRef<CanQualType>::const_iterator 399 I = argTypes.begin(), E = argTypes.end(); I != E; ++I) 400 assert(I->isCanonicalAsParam()); 401#endif 402 403 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); 404 405 // Lookup or create unique function info. 406 llvm::FoldingSetNodeID ID; 407 CGFunctionInfo::Profile(ID, info, required, resultType, argTypes); 408 409 void *insertPos = 0; 410 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); 411 if (FI) 412 return *FI; 413 414 // Construct the function info. We co-allocate the ArgInfos. 415 FI = CGFunctionInfo::create(CC, info, resultType, argTypes, required); 416 FunctionInfos.InsertNode(FI, insertPos); 417 418 bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted; 419 assert(inserted && "Recursively being processed?"); 420 421 // Compute ABI information. 422 getABIInfo().computeInfo(*FI); 423 424 // Loop over all of the computed argument and return value info. If any of 425 // them are direct or extend without a specified coerce type, specify the 426 // default now. 427 ABIArgInfo &retInfo = FI->getReturnInfo(); 428 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == 0) 429 retInfo.setCoerceToType(ConvertType(FI->getReturnType())); 430 431 for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end(); 432 I != E; ++I) 433 if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0) 434 I->info.setCoerceToType(ConvertType(I->type)); 435 436 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; 437 assert(erased && "Not in set?"); 438 439 return *FI; 440} 441 442CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, 443 const FunctionType::ExtInfo &info, 444 CanQualType resultType, 445 ArrayRef<CanQualType> argTypes, 446 RequiredArgs required) { 447 void *buffer = operator new(sizeof(CGFunctionInfo) + 448 sizeof(ArgInfo) * (argTypes.size() + 1)); 449 CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); 450 FI->CallingConvention = llvmCC; 451 FI->EffectiveCallingConvention = llvmCC; 452 FI->ASTCallingConvention = info.getCC(); 453 FI->NoReturn = info.getNoReturn(); 454 FI->ReturnsRetained = info.getProducesResult(); 455 FI->Required = required; 456 FI->HasRegParm = info.getHasRegParm(); 457 FI->RegParm = info.getRegParm(); 458 FI->NumArgs = argTypes.size(); 459 FI->getArgsBuffer()[0].type = resultType; 460 for (unsigned i = 0, e = argTypes.size(); i != e; ++i) 461 FI->getArgsBuffer()[i + 1].type = argTypes[i]; 462 return FI; 463} 464 465/***/ 466 467void CodeGenTypes::GetExpandedTypes(QualType type, 468 SmallVectorImpl<llvm::Type*> &expandedTypes) { 469 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) { 470 uint64_t NumElts = AT->getSize().getZExtValue(); 471 for (uint64_t Elt = 0; Elt < NumElts; ++Elt) 472 GetExpandedTypes(AT->getElementType(), expandedTypes); 473 } else if (const RecordType *RT = type->getAs<RecordType>()) { 474 const RecordDecl *RD = RT->getDecl(); 475 assert(!RD->hasFlexibleArrayMember() && 476 "Cannot expand structure with flexible array."); 477 if (RD->isUnion()) { 478 // Unions can be here only in degenerative cases - all the fields are same 479 // after flattening. Thus we have to use the "largest" field. 480 const FieldDecl *LargestFD = 0; 481 CharUnits UnionSize = CharUnits::Zero(); 482 483 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 484 i != e; ++i) { 485 const FieldDecl *FD = *i; 486 assert(!FD->isBitField() && 487 "Cannot expand structure with bit-field members."); 488 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType()); 489 if (UnionSize < FieldSize) { 490 UnionSize = FieldSize; 491 LargestFD = FD; 492 } 493 } 494 if (LargestFD) 495 GetExpandedTypes(LargestFD->getType(), expandedTypes); 496 } else { 497 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 498 i != e; ++i) { 499 assert(!i->isBitField() && 500 "Cannot expand structure with bit-field members."); 501 GetExpandedTypes(i->getType(), expandedTypes); 502 } 503 } 504 } else if (const ComplexType *CT = type->getAs<ComplexType>()) { 505 llvm::Type *EltTy = ConvertType(CT->getElementType()); 506 expandedTypes.push_back(EltTy); 507 expandedTypes.push_back(EltTy); 508 } else 509 expandedTypes.push_back(ConvertType(type)); 510} 511 512llvm::Function::arg_iterator 513CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, 514 llvm::Function::arg_iterator AI) { 515 assert(LV.isSimple() && 516 "Unexpected non-simple lvalue during struct expansion."); 517 518 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 519 unsigned NumElts = AT->getSize().getZExtValue(); 520 QualType EltTy = AT->getElementType(); 521 for (unsigned Elt = 0; Elt < NumElts; ++Elt) { 522 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, Elt); 523 LValue LV = MakeAddrLValue(EltAddr, EltTy); 524 AI = ExpandTypeFromArgs(EltTy, LV, AI); 525 } 526 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 527 RecordDecl *RD = RT->getDecl(); 528 if (RD->isUnion()) { 529 // Unions can be here only in degenerative cases - all the fields are same 530 // after flattening. Thus we have to use the "largest" field. 531 const FieldDecl *LargestFD = 0; 532 CharUnits UnionSize = CharUnits::Zero(); 533 534 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 535 i != e; ++i) { 536 const FieldDecl *FD = *i; 537 assert(!FD->isBitField() && 538 "Cannot expand structure with bit-field members."); 539 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType()); 540 if (UnionSize < FieldSize) { 541 UnionSize = FieldSize; 542 LargestFD = FD; 543 } 544 } 545 if (LargestFD) { 546 // FIXME: What are the right qualifiers here? 547 LValue SubLV = EmitLValueForField(LV, LargestFD); 548 AI = ExpandTypeFromArgs(LargestFD->getType(), SubLV, AI); 549 } 550 } else { 551 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 552 i != e; ++i) { 553 FieldDecl *FD = *i; 554 QualType FT = FD->getType(); 555 556 // FIXME: What are the right qualifiers here? 557 LValue SubLV = EmitLValueForField(LV, FD); 558 AI = ExpandTypeFromArgs(FT, SubLV, AI); 559 } 560 } 561 } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 562 QualType EltTy = CT->getElementType(); 563 llvm::Value *RealAddr = Builder.CreateStructGEP(LV.getAddress(), 0, "real"); 564 EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy)); 565 llvm::Value *ImagAddr = Builder.CreateStructGEP(LV.getAddress(), 1, "imag"); 566 EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy)); 567 } else { 568 EmitStoreThroughLValue(RValue::get(AI), LV); 569 ++AI; 570 } 571 572 return AI; 573} 574 575/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 576/// accessing some number of bytes out of it, try to gep into the struct to get 577/// at its inner goodness. Dive as deep as possible without entering an element 578/// with an in-memory size smaller than DstSize. 579static llvm::Value * 580EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr, 581 llvm::StructType *SrcSTy, 582 uint64_t DstSize, CodeGenFunction &CGF) { 583 // We can't dive into a zero-element struct. 584 if (SrcSTy->getNumElements() == 0) return SrcPtr; 585 586 llvm::Type *FirstElt = SrcSTy->getElementType(0); 587 588 // If the first elt is at least as large as what we're looking for, or if the 589 // first element is the same size as the whole struct, we can enter it. 590 uint64_t FirstEltSize = 591 CGF.CGM.getDataLayout().getTypeAllocSize(FirstElt); 592 if (FirstEltSize < DstSize && 593 FirstEltSize < CGF.CGM.getDataLayout().getTypeAllocSize(SrcSTy)) 594 return SrcPtr; 595 596 // GEP into the first element. 597 SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive"); 598 599 // If the first element is a struct, recurse. 600 llvm::Type *SrcTy = 601 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 602 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 603 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 604 605 return SrcPtr; 606} 607 608/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 609/// are either integers or pointers. This does a truncation of the value if it 610/// is too large or a zero extension if it is too small. 611static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 612 llvm::Type *Ty, 613 CodeGenFunction &CGF) { 614 if (Val->getType() == Ty) 615 return Val; 616 617 if (isa<llvm::PointerType>(Val->getType())) { 618 // If this is Pointer->Pointer avoid conversion to and from int. 619 if (isa<llvm::PointerType>(Ty)) 620 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 621 622 // Convert the pointer to an integer so we can play with its width. 623 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 624 } 625 626 llvm::Type *DestIntTy = Ty; 627 if (isa<llvm::PointerType>(DestIntTy)) 628 DestIntTy = CGF.IntPtrTy; 629 630 if (Val->getType() != DestIntTy) 631 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 632 633 if (isa<llvm::PointerType>(Ty)) 634 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 635 return Val; 636} 637 638 639 640/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 641/// a pointer to an object of type \arg Ty. 642/// 643/// This safely handles the case when the src type is smaller than the 644/// destination type; in this situation the values of bits which not 645/// present in the src are undefined. 646static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr, 647 llvm::Type *Ty, 648 CodeGenFunction &CGF) { 649 llvm::Type *SrcTy = 650 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 651 652 // If SrcTy and Ty are the same, just do a load. 653 if (SrcTy == Ty) 654 return CGF.Builder.CreateLoad(SrcPtr); 655 656 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); 657 658 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 659 SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 660 SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 661 } 662 663 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 664 665 // If the source and destination are integer or pointer types, just do an 666 // extension or truncation to the desired type. 667 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 668 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 669 llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr); 670 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 671 } 672 673 // If load is legal, just bitcast the src pointer. 674 if (SrcSize >= DstSize) { 675 // Generally SrcSize is never greater than DstSize, since this means we are 676 // losing bits. However, this can happen in cases where the structure has 677 // additional padding, for example due to a user specified alignment. 678 // 679 // FIXME: Assert that we aren't truncating non-padding bits when have access 680 // to that information. 681 llvm::Value *Casted = 682 CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty)); 683 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); 684 // FIXME: Use better alignment / avoid requiring aligned load. 685 Load->setAlignment(1); 686 return Load; 687 } 688 689 // Otherwise do coercion through memory. This is stupid, but 690 // simple. 691 llvm::Value *Tmp = CGF.CreateTempAlloca(Ty); 692 llvm::Value *Casted = 693 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy)); 694 llvm::StoreInst *Store = 695 CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted); 696 // FIXME: Use better alignment / avoid requiring aligned store. 697 Store->setAlignment(1); 698 return CGF.Builder.CreateLoad(Tmp); 699} 700 701// Function to store a first-class aggregate into memory. We prefer to 702// store the elements rather than the aggregate to be more friendly to 703// fast-isel. 704// FIXME: Do we need to recurse here? 705static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, 706 llvm::Value *DestPtr, bool DestIsVolatile, 707 bool LowAlignment) { 708 // Prefer scalar stores to first-class aggregate stores. 709 if (llvm::StructType *STy = 710 dyn_cast<llvm::StructType>(Val->getType())) { 711 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 712 llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i); 713 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i); 714 llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr, 715 DestIsVolatile); 716 if (LowAlignment) 717 SI->setAlignment(1); 718 } 719 } else { 720 llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile); 721 if (LowAlignment) 722 SI->setAlignment(1); 723 } 724} 725 726/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 727/// where the source and destination may have different types. 728/// 729/// This safely handles the case when the src type is larger than the 730/// destination type; the upper bits of the src will be lost. 731static void CreateCoercedStore(llvm::Value *Src, 732 llvm::Value *DstPtr, 733 bool DstIsVolatile, 734 CodeGenFunction &CGF) { 735 llvm::Type *SrcTy = Src->getType(); 736 llvm::Type *DstTy = 737 cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 738 if (SrcTy == DstTy) { 739 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 740 return; 741 } 742 743 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 744 745 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 746 DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF); 747 DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 748 } 749 750 // If the source and destination are integer or pointer types, just do an 751 // extension or truncation to the desired type. 752 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 753 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 754 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 755 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 756 return; 757 } 758 759 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); 760 761 // If store is legal, just bitcast the src pointer. 762 if (SrcSize <= DstSize) { 763 llvm::Value *Casted = 764 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy)); 765 // FIXME: Use better alignment / avoid requiring aligned store. 766 BuildAggStore(CGF, Src, Casted, DstIsVolatile, true); 767 } else { 768 // Otherwise do coercion through memory. This is stupid, but 769 // simple. 770 771 // Generally SrcSize is never greater than DstSize, since this means we are 772 // losing bits. However, this can happen in cases where the structure has 773 // additional padding, for example due to a user specified alignment. 774 // 775 // FIXME: Assert that we aren't truncating non-padding bits when have access 776 // to that information. 777 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy); 778 CGF.Builder.CreateStore(Src, Tmp); 779 llvm::Value *Casted = 780 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy)); 781 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); 782 // FIXME: Use better alignment / avoid requiring aligned load. 783 Load->setAlignment(1); 784 CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile); 785 } 786} 787 788/***/ 789 790bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 791 return FI.getReturnInfo().isIndirect(); 792} 793 794bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 795 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 796 switch (BT->getKind()) { 797 default: 798 return false; 799 case BuiltinType::Float: 800 return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Float); 801 case BuiltinType::Double: 802 return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Double); 803 case BuiltinType::LongDouble: 804 return getContext().getTargetInfo().useObjCFPRetForRealType( 805 TargetInfo::LongDouble); 806 } 807 } 808 809 return false; 810} 811 812bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { 813 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { 814 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { 815 if (BT->getKind() == BuiltinType::LongDouble) 816 return getContext().getTargetInfo().useObjCFP2RetForComplexLongDouble(); 817 } 818 } 819 820 return false; 821} 822 823llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 824 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); 825 return GetFunctionType(FI); 826} 827 828llvm::FunctionType * 829CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { 830 831 bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted; 832 assert(Inserted && "Recursively being processed?"); 833 834 SmallVector<llvm::Type*, 8> argTypes; 835 llvm::Type *resultType = 0; 836 837 const ABIArgInfo &retAI = FI.getReturnInfo(); 838 switch (retAI.getKind()) { 839 case ABIArgInfo::Expand: 840 llvm_unreachable("Invalid ABI kind for return argument"); 841 842 case ABIArgInfo::Extend: 843 case ABIArgInfo::Direct: 844 resultType = retAI.getCoerceToType(); 845 break; 846 847 case ABIArgInfo::Indirect: { 848 assert(!retAI.getIndirectAlign() && "Align unused on indirect return."); 849 resultType = llvm::Type::getVoidTy(getLLVMContext()); 850 851 QualType ret = FI.getReturnType(); 852 llvm::Type *ty = ConvertType(ret); 853 unsigned addressSpace = Context.getTargetAddressSpace(ret); 854 argTypes.push_back(llvm::PointerType::get(ty, addressSpace)); 855 break; 856 } 857 858 case ABIArgInfo::Ignore: 859 resultType = llvm::Type::getVoidTy(getLLVMContext()); 860 break; 861 } 862 863 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 864 ie = FI.arg_end(); it != ie; ++it) { 865 const ABIArgInfo &argAI = it->info; 866 867 switch (argAI.getKind()) { 868 case ABIArgInfo::Ignore: 869 break; 870 871 case ABIArgInfo::Indirect: { 872 // indirect arguments are always on the stack, which is addr space #0. 873 llvm::Type *LTy = ConvertTypeForMem(it->type); 874 argTypes.push_back(LTy->getPointerTo()); 875 break; 876 } 877 878 case ABIArgInfo::Extend: 879 case ABIArgInfo::Direct: { 880 // Insert a padding type to ensure proper alignment. 881 if (llvm::Type *PaddingType = argAI.getPaddingType()) 882 argTypes.push_back(PaddingType); 883 // If the coerce-to type is a first class aggregate, flatten it. Either 884 // way is semantically identical, but fast-isel and the optimizer 885 // generally likes scalar values better than FCAs. 886 llvm::Type *argType = argAI.getCoerceToType(); 887 if (llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) { 888 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) 889 argTypes.push_back(st->getElementType(i)); 890 } else { 891 argTypes.push_back(argType); 892 } 893 break; 894 } 895 896 case ABIArgInfo::Expand: 897 GetExpandedTypes(it->type, argTypes); 898 break; 899 } 900 } 901 902 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; 903 assert(Erased && "Not in set?"); 904 905 return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic()); 906} 907 908llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 909 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 910 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 911 912 if (!isFuncTypeConvertible(FPT)) 913 return llvm::StructType::get(getLLVMContext()); 914 915 const CGFunctionInfo *Info; 916 if (isa<CXXDestructorDecl>(MD)) 917 Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType()); 918 else 919 Info = &arrangeCXXMethodDeclaration(MD); 920 return GetFunctionType(*Info); 921} 922 923void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, 924 const Decl *TargetDecl, 925 AttributeListType &PAL, 926 unsigned &CallingConv) { 927 llvm::Attributes::Builder FuncAttrs; 928 llvm::Attributes::Builder RetAttrs; 929 930 CallingConv = FI.getEffectiveCallingConvention(); 931 932 if (FI.isNoReturn()) 933 FuncAttrs.addAttribute(llvm::Attributes::NoReturn); 934 935 // FIXME: handle sseregparm someday... 936 if (TargetDecl) { 937 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 938 FuncAttrs.addAttribute(llvm::Attributes::ReturnsTwice); 939 if (TargetDecl->hasAttr<NoThrowAttr>()) 940 FuncAttrs.addAttribute(llvm::Attributes::NoUnwind); 941 else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 942 const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>(); 943 if (FPT && FPT->isNothrow(getContext())) 944 FuncAttrs.addAttribute(llvm::Attributes::NoUnwind); 945 } 946 947 if (TargetDecl->hasAttr<NoReturnAttr>()) 948 FuncAttrs.addAttribute(llvm::Attributes::NoReturn); 949 950 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 951 FuncAttrs.addAttribute(llvm::Attributes::ReturnsTwice); 952 953 // 'const' and 'pure' attribute functions are also nounwind. 954 if (TargetDecl->hasAttr<ConstAttr>()) { 955 FuncAttrs.addAttribute(llvm::Attributes::ReadNone); 956 FuncAttrs.addAttribute(llvm::Attributes::NoUnwind); 957 } else if (TargetDecl->hasAttr<PureAttr>()) { 958 FuncAttrs.addAttribute(llvm::Attributes::ReadOnly); 959 FuncAttrs.addAttribute(llvm::Attributes::NoUnwind); 960 } 961 if (TargetDecl->hasAttr<MallocAttr>()) 962 RetAttrs.addAttribute(llvm::Attributes::NoAlias); 963 } 964 965 if (CodeGenOpts.OptimizeSize) 966 FuncAttrs.addAttribute(llvm::Attributes::OptimizeForSize); 967 if (CodeGenOpts.DisableRedZone) 968 FuncAttrs.addAttribute(llvm::Attributes::NoRedZone); 969 if (CodeGenOpts.NoImplicitFloat) 970 FuncAttrs.addAttribute(llvm::Attributes::NoImplicitFloat); 971 972 QualType RetTy = FI.getReturnType(); 973 unsigned Index = 1; 974 const ABIArgInfo &RetAI = FI.getReturnInfo(); 975 switch (RetAI.getKind()) { 976 case ABIArgInfo::Extend: 977 if (RetTy->hasSignedIntegerRepresentation()) 978 RetAttrs.addAttribute(llvm::Attributes::SExt); 979 else if (RetTy->hasUnsignedIntegerRepresentation()) 980 RetAttrs.addAttribute(llvm::Attributes::ZExt); 981 break; 982 case ABIArgInfo::Direct: 983 case ABIArgInfo::Ignore: 984 break; 985 986 case ABIArgInfo::Indirect: { 987 llvm::Attributes::Builder SRETAttrs; 988 SRETAttrs.addAttribute(llvm::Attributes::StructRet); 989 if (RetAI.getInReg()) 990 SRETAttrs.addAttribute(llvm::Attributes::InReg); 991 PAL.push_back(llvm:: 992 AttributeWithIndex::get(Index, 993 llvm::Attributes::get(getLLVMContext(), 994 SRETAttrs))); 995 996 ++Index; 997 // sret disables readnone and readonly 998 FuncAttrs.removeAttribute(llvm::Attributes::ReadOnly) 999 .removeAttribute(llvm::Attributes::ReadNone); 1000 break; 1001 } 1002 1003 case ABIArgInfo::Expand: 1004 llvm_unreachable("Invalid ABI kind for return argument"); 1005 } 1006 1007 if (RetAttrs.hasAttributes()) 1008 PAL.push_back(llvm:: 1009 AttributeWithIndex::get(0, 1010 llvm::Attributes::get(getLLVMContext(), 1011 RetAttrs))); 1012 1013 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 1014 ie = FI.arg_end(); it != ie; ++it) { 1015 QualType ParamType = it->type; 1016 const ABIArgInfo &AI = it->info; 1017 llvm::Attributes::Builder Attrs; 1018 1019 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 1020 // have the corresponding parameter variable. It doesn't make 1021 // sense to do it here because parameters are so messed up. 1022 switch (AI.getKind()) { 1023 case ABIArgInfo::Extend: 1024 if (ParamType->isSignedIntegerOrEnumerationType()) 1025 Attrs.addAttribute(llvm::Attributes::SExt); 1026 else if (ParamType->isUnsignedIntegerOrEnumerationType()) 1027 Attrs.addAttribute(llvm::Attributes::ZExt); 1028 // FALL THROUGH 1029 case ABIArgInfo::Direct: 1030 if (AI.getInReg()) 1031 Attrs.addAttribute(llvm::Attributes::InReg); 1032 1033 // FIXME: handle sseregparm someday... 1034 1035 // Increment Index if there is padding. 1036 Index += (AI.getPaddingType() != 0); 1037 1038 if (llvm::StructType *STy = 1039 dyn_cast<llvm::StructType>(AI.getCoerceToType())) { 1040 unsigned Extra = STy->getNumElements()-1; // 1 will be added below. 1041 if (Attrs.hasAttributes()) 1042 for (unsigned I = 0; I < Extra; ++I) 1043 PAL.push_back(llvm::AttributeWithIndex::get(Index + I, 1044 llvm::Attributes::get(getLLVMContext(), 1045 Attrs))); 1046 Index += Extra; 1047 } 1048 break; 1049 1050 case ABIArgInfo::Indirect: 1051 if (AI.getIndirectByVal()) 1052 Attrs.addAttribute(llvm::Attributes::ByVal); 1053 1054 Attrs.addAlignmentAttr(AI.getIndirectAlign()); 1055 1056 // byval disables readnone and readonly. 1057 FuncAttrs.removeAttribute(llvm::Attributes::ReadOnly) 1058 .removeAttribute(llvm::Attributes::ReadNone); 1059 break; 1060 1061 case ABIArgInfo::Ignore: 1062 // Skip increment, no matching LLVM parameter. 1063 continue; 1064 1065 case ABIArgInfo::Expand: { 1066 SmallVector<llvm::Type*, 8> types; 1067 // FIXME: This is rather inefficient. Do we ever actually need to do 1068 // anything here? The result should be just reconstructed on the other 1069 // side, so extension should be a non-issue. 1070 getTypes().GetExpandedTypes(ParamType, types); 1071 Index += types.size(); 1072 continue; 1073 } 1074 } 1075 1076 if (Attrs.hasAttributes()) 1077 PAL.push_back(llvm::AttributeWithIndex::get(Index, 1078 llvm::Attributes::get(getLLVMContext(), 1079 Attrs))); 1080 ++Index; 1081 } 1082 if (FuncAttrs.hasAttributes()) 1083 PAL.push_back(llvm::AttributeWithIndex::get(~0, 1084 llvm::Attributes::get(getLLVMContext(), 1085 FuncAttrs))); 1086} 1087 1088/// An argument came in as a promoted argument; demote it back to its 1089/// declared type. 1090static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, 1091 const VarDecl *var, 1092 llvm::Value *value) { 1093 llvm::Type *varType = CGF.ConvertType(var->getType()); 1094 1095 // This can happen with promotions that actually don't change the 1096 // underlying type, like the enum promotions. 1097 if (value->getType() == varType) return value; 1098 1099 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) 1100 && "unexpected promotion type"); 1101 1102 if (isa<llvm::IntegerType>(varType)) 1103 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); 1104 1105 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); 1106} 1107 1108void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 1109 llvm::Function *Fn, 1110 const FunctionArgList &Args) { 1111 // If this is an implicit-return-zero function, go ahead and 1112 // initialize the return value. TODO: it might be nice to have 1113 // a more general mechanism for this that didn't require synthesized 1114 // return statements. 1115 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) { 1116 if (FD->hasImplicitReturnZero()) { 1117 QualType RetTy = FD->getResultType().getUnqualifiedType(); 1118 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 1119 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 1120 Builder.CreateStore(Zero, ReturnValue); 1121 } 1122 } 1123 1124 // FIXME: We no longer need the types from FunctionArgList; lift up and 1125 // simplify. 1126 1127 // Emit allocs for param decls. Give the LLVM Argument nodes names. 1128 llvm::Function::arg_iterator AI = Fn->arg_begin(); 1129 1130 // Name the struct return argument. 1131 if (CGM.ReturnTypeUsesSRet(FI)) { 1132 AI->setName("agg.result"); 1133 llvm::Attributes::Builder B; 1134 B.addAttribute(llvm::Attributes::NoAlias); 1135 AI->addAttr(llvm::Attributes::get(getLLVMContext(), B)); 1136 ++AI; 1137 } 1138 1139 assert(FI.arg_size() == Args.size() && 1140 "Mismatch between function signature & arguments."); 1141 unsigned ArgNo = 1; 1142 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 1143 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 1144 i != e; ++i, ++info_it, ++ArgNo) { 1145 const VarDecl *Arg = *i; 1146 QualType Ty = info_it->type; 1147 const ABIArgInfo &ArgI = info_it->info; 1148 1149 bool isPromoted = 1150 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); 1151 1152 switch (ArgI.getKind()) { 1153 case ABIArgInfo::Indirect: { 1154 llvm::Value *V = AI; 1155 1156 if (hasAggregateLLVMType(Ty)) { 1157 // Aggregates and complex variables are accessed by reference. All we 1158 // need to do is realign the value, if requested 1159 if (ArgI.getIndirectRealign()) { 1160 llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce"); 1161 1162 // Copy from the incoming argument pointer to the temporary with the 1163 // appropriate alignment. 1164 // 1165 // FIXME: We should have a common utility for generating an aggregate 1166 // copy. 1167 llvm::Type *I8PtrTy = Builder.getInt8PtrTy(); 1168 CharUnits Size = getContext().getTypeSizeInChars(Ty); 1169 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy); 1170 llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy); 1171 Builder.CreateMemCpy(Dst, 1172 Src, 1173 llvm::ConstantInt::get(IntPtrTy, 1174 Size.getQuantity()), 1175 ArgI.getIndirectAlign(), 1176 false); 1177 V = AlignedTemp; 1178 } 1179 } else { 1180 // Load scalar value from indirect argument. 1181 CharUnits Alignment = getContext().getTypeAlignInChars(Ty); 1182 V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty); 1183 1184 if (isPromoted) 1185 V = emitArgumentDemotion(*this, Arg, V); 1186 } 1187 EmitParmDecl(*Arg, V, ArgNo); 1188 break; 1189 } 1190 1191 case ABIArgInfo::Extend: 1192 case ABIArgInfo::Direct: { 1193 // Skip the dummy padding argument. 1194 if (ArgI.getPaddingType()) 1195 ++AI; 1196 1197 // If we have the trivial case, handle it with no muss and fuss. 1198 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 1199 ArgI.getCoerceToType() == ConvertType(Ty) && 1200 ArgI.getDirectOffset() == 0) { 1201 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1202 llvm::Value *V = AI; 1203 1204 if (Arg->getType().isRestrictQualified()) { 1205 llvm::Attributes::Builder B; 1206 B.addAttribute(llvm::Attributes::NoAlias); 1207 AI->addAttr(llvm::Attributes::get(getLLVMContext(), B)); 1208 } 1209 1210 // Ensure the argument is the correct type. 1211 if (V->getType() != ArgI.getCoerceToType()) 1212 V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); 1213 1214 if (isPromoted) 1215 V = emitArgumentDemotion(*this, Arg, V); 1216 1217 EmitParmDecl(*Arg, V, ArgNo); 1218 break; 1219 } 1220 1221 llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName()); 1222 1223 // The alignment we need to use is the max of the requested alignment for 1224 // the argument plus the alignment required by our access code below. 1225 unsigned AlignmentToUse = 1226 CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType()); 1227 AlignmentToUse = std::max(AlignmentToUse, 1228 (unsigned)getContext().getDeclAlign(Arg).getQuantity()); 1229 1230 Alloca->setAlignment(AlignmentToUse); 1231 llvm::Value *V = Alloca; 1232 llvm::Value *Ptr = V; // Pointer to store into. 1233 1234 // If the value is offset in memory, apply the offset now. 1235 if (unsigned Offs = ArgI.getDirectOffset()) { 1236 Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy()); 1237 Ptr = Builder.CreateConstGEP1_32(Ptr, Offs); 1238 Ptr = Builder.CreateBitCast(Ptr, 1239 llvm::PointerType::getUnqual(ArgI.getCoerceToType())); 1240 } 1241 1242 // If the coerce-to type is a first class aggregate, we flatten it and 1243 // pass the elements. Either way is semantically identical, but fast-isel 1244 // and the optimizer generally likes scalar values better than FCAs. 1245 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); 1246 if (STy && STy->getNumElements() > 1) { 1247 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy); 1248 llvm::Type *DstTy = 1249 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 1250 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy); 1251 1252 if (SrcSize <= DstSize) { 1253 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy)); 1254 1255 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1256 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1257 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 1258 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i); 1259 Builder.CreateStore(AI++, EltPtr); 1260 } 1261 } else { 1262 llvm::AllocaInst *TempAlloca = 1263 CreateTempAlloca(ArgI.getCoerceToType(), "coerce"); 1264 TempAlloca->setAlignment(AlignmentToUse); 1265 llvm::Value *TempV = TempAlloca; 1266 1267 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1268 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1269 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 1270 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i); 1271 Builder.CreateStore(AI++, EltPtr); 1272 } 1273 1274 Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse); 1275 } 1276 } else { 1277 // Simple case, just do a coerced store of the argument into the alloca. 1278 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1279 AI->setName(Arg->getName() + ".coerce"); 1280 CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this); 1281 } 1282 1283 1284 // Match to what EmitParmDecl is expecting for this type. 1285 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) { 1286 V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty); 1287 if (isPromoted) 1288 V = emitArgumentDemotion(*this, Arg, V); 1289 } 1290 EmitParmDecl(*Arg, V, ArgNo); 1291 continue; // Skip ++AI increment, already done. 1292 } 1293 1294 case ABIArgInfo::Expand: { 1295 // If this structure was expanded into multiple arguments then 1296 // we need to create a temporary and reconstruct it from the 1297 // arguments. 1298 llvm::AllocaInst *Alloca = CreateMemTemp(Ty); 1299 CharUnits Align = getContext().getDeclAlign(Arg); 1300 Alloca->setAlignment(Align.getQuantity()); 1301 LValue LV = MakeAddrLValue(Alloca, Ty, Align); 1302 llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI); 1303 EmitParmDecl(*Arg, Alloca, ArgNo); 1304 1305 // Name the arguments used in expansion and increment AI. 1306 unsigned Index = 0; 1307 for (; AI != End; ++AI, ++Index) 1308 AI->setName(Arg->getName() + "." + Twine(Index)); 1309 continue; 1310 } 1311 1312 case ABIArgInfo::Ignore: 1313 // Initialize the local variable appropriately. 1314 if (hasAggregateLLVMType(Ty)) 1315 EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo); 1316 else 1317 EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())), 1318 ArgNo); 1319 1320 // Skip increment, no matching LLVM parameter. 1321 continue; 1322 } 1323 1324 ++AI; 1325 } 1326 assert(AI == Fn->arg_end() && "Argument mismatch!"); 1327} 1328 1329static void eraseUnusedBitCasts(llvm::Instruction *insn) { 1330 while (insn->use_empty()) { 1331 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); 1332 if (!bitcast) return; 1333 1334 // This is "safe" because we would have used a ConstantExpr otherwise. 1335 insn = cast<llvm::Instruction>(bitcast->getOperand(0)); 1336 bitcast->eraseFromParent(); 1337 } 1338} 1339 1340/// Try to emit a fused autorelease of a return result. 1341static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, 1342 llvm::Value *result) { 1343 // We must be immediately followed the cast. 1344 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); 1345 if (BB->empty()) return 0; 1346 if (&BB->back() != result) return 0; 1347 1348 llvm::Type *resultType = result->getType(); 1349 1350 // result is in a BasicBlock and is therefore an Instruction. 1351 llvm::Instruction *generator = cast<llvm::Instruction>(result); 1352 1353 SmallVector<llvm::Instruction*,4> insnsToKill; 1354 1355 // Look for: 1356 // %generator = bitcast %type1* %generator2 to %type2* 1357 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { 1358 // We would have emitted this as a constant if the operand weren't 1359 // an Instruction. 1360 generator = cast<llvm::Instruction>(bitcast->getOperand(0)); 1361 1362 // Require the generator to be immediately followed by the cast. 1363 if (generator->getNextNode() != bitcast) 1364 return 0; 1365 1366 insnsToKill.push_back(bitcast); 1367 } 1368 1369 // Look for: 1370 // %generator = call i8* @objc_retain(i8* %originalResult) 1371 // or 1372 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) 1373 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); 1374 if (!call) return 0; 1375 1376 bool doRetainAutorelease; 1377 1378 if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) { 1379 doRetainAutorelease = true; 1380 } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints() 1381 .objc_retainAutoreleasedReturnValue) { 1382 doRetainAutorelease = false; 1383 1384 // If we emitted an assembly marker for this call (and the 1385 // ARCEntrypoints field should have been set if so), go looking 1386 // for that call. If we can't find it, we can't do this 1387 // optimization. But it should always be the immediately previous 1388 // instruction, unless we needed bitcasts around the call. 1389 if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) { 1390 llvm::Instruction *prev = call->getPrevNode(); 1391 assert(prev); 1392 if (isa<llvm::BitCastInst>(prev)) { 1393 prev = prev->getPrevNode(); 1394 assert(prev); 1395 } 1396 assert(isa<llvm::CallInst>(prev)); 1397 assert(cast<llvm::CallInst>(prev)->getCalledValue() == 1398 CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker); 1399 insnsToKill.push_back(prev); 1400 } 1401 } else { 1402 return 0; 1403 } 1404 1405 result = call->getArgOperand(0); 1406 insnsToKill.push_back(call); 1407 1408 // Keep killing bitcasts, for sanity. Note that we no longer care 1409 // about precise ordering as long as there's exactly one use. 1410 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { 1411 if (!bitcast->hasOneUse()) break; 1412 insnsToKill.push_back(bitcast); 1413 result = bitcast->getOperand(0); 1414 } 1415 1416 // Delete all the unnecessary instructions, from latest to earliest. 1417 for (SmallVectorImpl<llvm::Instruction*>::iterator 1418 i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i) 1419 (*i)->eraseFromParent(); 1420 1421 // Do the fused retain/autorelease if we were asked to. 1422 if (doRetainAutorelease) 1423 result = CGF.EmitARCRetainAutoreleaseReturnValue(result); 1424 1425 // Cast back to the result type. 1426 return CGF.Builder.CreateBitCast(result, resultType); 1427} 1428 1429/// If this is a +1 of the value of an immutable 'self', remove it. 1430static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, 1431 llvm::Value *result) { 1432 // This is only applicable to a method with an immutable 'self'. 1433 const ObjCMethodDecl *method = 1434 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl); 1435 if (!method) return 0; 1436 const VarDecl *self = method->getSelfDecl(); 1437 if (!self->getType().isConstQualified()) return 0; 1438 1439 // Look for a retain call. 1440 llvm::CallInst *retainCall = 1441 dyn_cast<llvm::CallInst>(result->stripPointerCasts()); 1442 if (!retainCall || 1443 retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain) 1444 return 0; 1445 1446 // Look for an ordinary load of 'self'. 1447 llvm::Value *retainedValue = retainCall->getArgOperand(0); 1448 llvm::LoadInst *load = 1449 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); 1450 if (!load || load->isAtomic() || load->isVolatile() || 1451 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self)) 1452 return 0; 1453 1454 // Okay! Burn it all down. This relies for correctness on the 1455 // assumption that the retain is emitted as part of the return and 1456 // that thereafter everything is used "linearly". 1457 llvm::Type *resultType = result->getType(); 1458 eraseUnusedBitCasts(cast<llvm::Instruction>(result)); 1459 assert(retainCall->use_empty()); 1460 retainCall->eraseFromParent(); 1461 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); 1462 1463 return CGF.Builder.CreateBitCast(load, resultType); 1464} 1465 1466/// Emit an ARC autorelease of the result of a function. 1467/// 1468/// \return the value to actually return from the function 1469static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, 1470 llvm::Value *result) { 1471 // If we're returning 'self', kill the initial retain. This is a 1472 // heuristic attempt to "encourage correctness" in the really unfortunate 1473 // case where we have a return of self during a dealloc and we desperately 1474 // need to avoid the possible autorelease. 1475 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) 1476 return self; 1477 1478 // At -O0, try to emit a fused retain/autorelease. 1479 if (CGF.shouldUseFusedARCCalls()) 1480 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) 1481 return fused; 1482 1483 return CGF.EmitARCAutoreleaseReturnValue(result); 1484} 1485 1486/// Heuristically search for a dominating store to the return-value slot. 1487static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { 1488 // If there are multiple uses of the return-value slot, just check 1489 // for something immediately preceding the IP. Sometimes this can 1490 // happen with how we generate implicit-returns; it can also happen 1491 // with noreturn cleanups. 1492 if (!CGF.ReturnValue->hasOneUse()) { 1493 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 1494 if (IP->empty()) return 0; 1495 llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back()); 1496 if (!store) return 0; 1497 if (store->getPointerOperand() != CGF.ReturnValue) return 0; 1498 assert(!store->isAtomic() && !store->isVolatile()); // see below 1499 return store; 1500 } 1501 1502 llvm::StoreInst *store = 1503 dyn_cast<llvm::StoreInst>(CGF.ReturnValue->use_back()); 1504 if (!store) return 0; 1505 1506 // These aren't actually possible for non-coerced returns, and we 1507 // only care about non-coerced returns on this code path. 1508 assert(!store->isAtomic() && !store->isVolatile()); 1509 1510 // Now do a first-and-dirty dominance check: just walk up the 1511 // single-predecessors chain from the current insertion point. 1512 llvm::BasicBlock *StoreBB = store->getParent(); 1513 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 1514 while (IP != StoreBB) { 1515 if (!(IP = IP->getSinglePredecessor())) 1516 return 0; 1517 } 1518 1519 // Okay, the store's basic block dominates the insertion point; we 1520 // can do our thing. 1521 return store; 1522} 1523 1524void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) { 1525 // Functions with no result always return void. 1526 if (ReturnValue == 0) { 1527 Builder.CreateRetVoid(); 1528 return; 1529 } 1530 1531 llvm::DebugLoc RetDbgLoc; 1532 llvm::Value *RV = 0; 1533 QualType RetTy = FI.getReturnType(); 1534 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1535 1536 switch (RetAI.getKind()) { 1537 case ABIArgInfo::Indirect: { 1538 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity(); 1539 if (RetTy->isAnyComplexType()) { 1540 ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false); 1541 StoreComplexToAddr(RT, CurFn->arg_begin(), false); 1542 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 1543 // Do nothing; aggregrates get evaluated directly into the destination. 1544 } else { 1545 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(), 1546 false, Alignment, RetTy); 1547 } 1548 break; 1549 } 1550 1551 case ABIArgInfo::Extend: 1552 case ABIArgInfo::Direct: 1553 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 1554 RetAI.getDirectOffset() == 0) { 1555 // The internal return value temp always will have pointer-to-return-type 1556 // type, just do a load. 1557 1558 // If there is a dominating store to ReturnValue, we can elide 1559 // the load, zap the store, and usually zap the alloca. 1560 if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) { 1561 // Get the stored value and nuke the now-dead store. 1562 RetDbgLoc = SI->getDebugLoc(); 1563 RV = SI->getValueOperand(); 1564 SI->eraseFromParent(); 1565 1566 // If that was the only use of the return value, nuke it as well now. 1567 if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) { 1568 cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent(); 1569 ReturnValue = 0; 1570 } 1571 1572 // Otherwise, we have to do a simple load. 1573 } else { 1574 RV = Builder.CreateLoad(ReturnValue); 1575 } 1576 } else { 1577 llvm::Value *V = ReturnValue; 1578 // If the value is offset in memory, apply the offset now. 1579 if (unsigned Offs = RetAI.getDirectOffset()) { 1580 V = Builder.CreateBitCast(V, Builder.getInt8PtrTy()); 1581 V = Builder.CreateConstGEP1_32(V, Offs); 1582 V = Builder.CreateBitCast(V, 1583 llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 1584 } 1585 1586 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); 1587 } 1588 1589 // In ARC, end functions that return a retainable type with a call 1590 // to objc_autoreleaseReturnValue. 1591 if (AutoreleaseResult) { 1592 assert(getLangOpts().ObjCAutoRefCount && 1593 !FI.isReturnsRetained() && 1594 RetTy->isObjCRetainableType()); 1595 RV = emitAutoreleaseOfResult(*this, RV); 1596 } 1597 1598 break; 1599 1600 case ABIArgInfo::Ignore: 1601 break; 1602 1603 case ABIArgInfo::Expand: 1604 llvm_unreachable("Invalid ABI kind for return argument"); 1605 } 1606 1607 llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid(); 1608 if (!RetDbgLoc.isUnknown()) 1609 Ret->setDebugLoc(RetDbgLoc); 1610} 1611 1612void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, 1613 const VarDecl *param) { 1614 // StartFunction converted the ABI-lowered parameter(s) into a 1615 // local alloca. We need to turn that into an r-value suitable 1616 // for EmitCall. 1617 llvm::Value *local = GetAddrOfLocalVar(param); 1618 1619 QualType type = param->getType(); 1620 1621 // For the most part, we just need to load the alloca, except: 1622 // 1) aggregate r-values are actually pointers to temporaries, and 1623 // 2) references to aggregates are pointers directly to the aggregate. 1624 // I don't know why references to non-aggregates are different here. 1625 if (const ReferenceType *ref = type->getAs<ReferenceType>()) { 1626 if (hasAggregateLLVMType(ref->getPointeeType())) 1627 return args.add(RValue::getAggregate(local), type); 1628 1629 // Locals which are references to scalars are represented 1630 // with allocas holding the pointer. 1631 return args.add(RValue::get(Builder.CreateLoad(local)), type); 1632 } 1633 1634 if (type->isAnyComplexType()) { 1635 ComplexPairTy complex = LoadComplexFromAddr(local, /*volatile*/ false); 1636 return args.add(RValue::getComplex(complex), type); 1637 } 1638 1639 if (hasAggregateLLVMType(type)) 1640 return args.add(RValue::getAggregate(local), type); 1641 1642 unsigned alignment = getContext().getDeclAlign(param).getQuantity(); 1643 llvm::Value *value = EmitLoadOfScalar(local, false, alignment, type); 1644 return args.add(RValue::get(value), type); 1645} 1646 1647static bool isProvablyNull(llvm::Value *addr) { 1648 return isa<llvm::ConstantPointerNull>(addr); 1649} 1650 1651static bool isProvablyNonNull(llvm::Value *addr) { 1652 return isa<llvm::AllocaInst>(addr); 1653} 1654 1655/// Emit the actual writing-back of a writeback. 1656static void emitWriteback(CodeGenFunction &CGF, 1657 const CallArgList::Writeback &writeback) { 1658 llvm::Value *srcAddr = writeback.Address; 1659 assert(!isProvablyNull(srcAddr) && 1660 "shouldn't have writeback for provably null argument"); 1661 1662 llvm::BasicBlock *contBB = 0; 1663 1664 // If the argument wasn't provably non-null, we need to null check 1665 // before doing the store. 1666 bool provablyNonNull = isProvablyNonNull(srcAddr); 1667 if (!provablyNonNull) { 1668 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); 1669 contBB = CGF.createBasicBlock("icr.done"); 1670 1671 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); 1672 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); 1673 CGF.EmitBlock(writebackBB); 1674 } 1675 1676 // Load the value to writeback. 1677 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); 1678 1679 // Cast it back, in case we're writing an id to a Foo* or something. 1680 value = CGF.Builder.CreateBitCast(value, 1681 cast<llvm::PointerType>(srcAddr->getType())->getElementType(), 1682 "icr.writeback-cast"); 1683 1684 // Perform the writeback. 1685 QualType srcAddrType = writeback.AddressType; 1686 CGF.EmitStoreThroughLValue(RValue::get(value), 1687 CGF.MakeAddrLValue(srcAddr, srcAddrType)); 1688 1689 // Jump to the continuation block. 1690 if (!provablyNonNull) 1691 CGF.EmitBlock(contBB); 1692} 1693 1694static void emitWritebacks(CodeGenFunction &CGF, 1695 const CallArgList &args) { 1696 for (CallArgList::writeback_iterator 1697 i = args.writeback_begin(), e = args.writeback_end(); i != e; ++i) 1698 emitWriteback(CGF, *i); 1699} 1700 1701/// Emit an argument that's being passed call-by-writeback. That is, 1702/// we are passing the address of 1703static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, 1704 const ObjCIndirectCopyRestoreExpr *CRE) { 1705 llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr()); 1706 1707 // The dest and src types don't necessarily match in LLVM terms 1708 // because of the crazy ObjC compatibility rules. 1709 1710 llvm::PointerType *destType = 1711 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); 1712 1713 // If the address is a constant null, just pass the appropriate null. 1714 if (isProvablyNull(srcAddr)) { 1715 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), 1716 CRE->getType()); 1717 return; 1718 } 1719 1720 QualType srcAddrType = 1721 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); 1722 1723 // Create the temporary. 1724 llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(), 1725 "icr.temp"); 1726 1727 // Zero-initialize it if we're not doing a copy-initialization. 1728 bool shouldCopy = CRE->shouldCopy(); 1729 if (!shouldCopy) { 1730 llvm::Value *null = 1731 llvm::ConstantPointerNull::get( 1732 cast<llvm::PointerType>(destType->getElementType())); 1733 CGF.Builder.CreateStore(null, temp); 1734 } 1735 1736 llvm::BasicBlock *contBB = 0; 1737 1738 // If the address is *not* known to be non-null, we need to switch. 1739 llvm::Value *finalArgument; 1740 1741 bool provablyNonNull = isProvablyNonNull(srcAddr); 1742 if (provablyNonNull) { 1743 finalArgument = temp; 1744 } else { 1745 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); 1746 1747 finalArgument = CGF.Builder.CreateSelect(isNull, 1748 llvm::ConstantPointerNull::get(destType), 1749 temp, "icr.argument"); 1750 1751 // If we need to copy, then the load has to be conditional, which 1752 // means we need control flow. 1753 if (shouldCopy) { 1754 contBB = CGF.createBasicBlock("icr.cont"); 1755 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); 1756 CGF.Builder.CreateCondBr(isNull, contBB, copyBB); 1757 CGF.EmitBlock(copyBB); 1758 } 1759 } 1760 1761 // Perform a copy if necessary. 1762 if (shouldCopy) { 1763 LValue srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType); 1764 RValue srcRV = CGF.EmitLoadOfLValue(srcLV); 1765 assert(srcRV.isScalar()); 1766 1767 llvm::Value *src = srcRV.getScalarVal(); 1768 src = CGF.Builder.CreateBitCast(src, destType->getElementType(), 1769 "icr.cast"); 1770 1771 // Use an ordinary store, not a store-to-lvalue. 1772 CGF.Builder.CreateStore(src, temp); 1773 } 1774 1775 // Finish the control flow if we needed it. 1776 if (shouldCopy && !provablyNonNull) 1777 CGF.EmitBlock(contBB); 1778 1779 args.addWriteback(srcAddr, srcAddrType, temp); 1780 args.add(RValue::get(finalArgument), CRE->getType()); 1781} 1782 1783void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, 1784 QualType type) { 1785 if (const ObjCIndirectCopyRestoreExpr *CRE 1786 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { 1787 assert(getContext().getLangOpts().ObjCAutoRefCount); 1788 assert(getContext().hasSameType(E->getType(), type)); 1789 return emitWritebackArg(*this, args, CRE); 1790 } 1791 1792 assert(type->isReferenceType() == E->isGLValue() && 1793 "reference binding to unmaterialized r-value!"); 1794 1795 if (E->isGLValue()) { 1796 assert(E->getObjectKind() == OK_Ordinary); 1797 return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0), 1798 type); 1799 } 1800 1801 if (hasAggregateLLVMType(type) && !E->getType()->isAnyComplexType() && 1802 isa<ImplicitCastExpr>(E) && 1803 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { 1804 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); 1805 assert(L.isSimple()); 1806 args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true); 1807 return; 1808 } 1809 1810 args.add(EmitAnyExprToTemp(E), type); 1811} 1812 1813// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 1814// optimizer it can aggressively ignore unwind edges. 1815void 1816CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { 1817 if (CGM.getCodeGenOpts().OptimizationLevel != 0 && 1818 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) 1819 Inst->setMetadata("clang.arc.no_objc_arc_exceptions", 1820 CGM.getNoObjCARCExceptionsMetadata()); 1821} 1822 1823/// Emits a call or invoke instruction to the given function, depending 1824/// on the current state of the EH stack. 1825llvm::CallSite 1826CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 1827 ArrayRef<llvm::Value *> Args, 1828 const Twine &Name) { 1829 llvm::BasicBlock *InvokeDest = getInvokeDest(); 1830 1831 llvm::Instruction *Inst; 1832 if (!InvokeDest) 1833 Inst = Builder.CreateCall(Callee, Args, Name); 1834 else { 1835 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 1836 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name); 1837 EmitBlock(ContBB); 1838 } 1839 1840 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 1841 // optimizer it can aggressively ignore unwind edges. 1842 if (CGM.getLangOpts().ObjCAutoRefCount) 1843 AddObjCARCExceptionMetadata(Inst); 1844 1845 return Inst; 1846} 1847 1848llvm::CallSite 1849CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 1850 const Twine &Name) { 1851 return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name); 1852} 1853 1854static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo, 1855 llvm::FunctionType *FTy) { 1856 if (ArgNo < FTy->getNumParams()) 1857 assert(Elt->getType() == FTy->getParamType(ArgNo)); 1858 else 1859 assert(FTy->isVarArg()); 1860 ++ArgNo; 1861} 1862 1863void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV, 1864 SmallVector<llvm::Value*,16> &Args, 1865 llvm::FunctionType *IRFuncTy) { 1866 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 1867 unsigned NumElts = AT->getSize().getZExtValue(); 1868 QualType EltTy = AT->getElementType(); 1869 llvm::Value *Addr = RV.getAggregateAddr(); 1870 for (unsigned Elt = 0; Elt < NumElts; ++Elt) { 1871 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt); 1872 LValue LV = MakeAddrLValue(EltAddr, EltTy); 1873 RValue EltRV; 1874 if (EltTy->isAnyComplexType()) 1875 // FIXME: Volatile? 1876 EltRV = RValue::getComplex(LoadComplexFromAddr(LV.getAddress(), false)); 1877 else if (CodeGenFunction::hasAggregateLLVMType(EltTy)) 1878 EltRV = LV.asAggregateRValue(); 1879 else 1880 EltRV = EmitLoadOfLValue(LV); 1881 ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy); 1882 } 1883 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 1884 RecordDecl *RD = RT->getDecl(); 1885 assert(RV.isAggregate() && "Unexpected rvalue during struct expansion"); 1886 LValue LV = MakeAddrLValue(RV.getAggregateAddr(), Ty); 1887 1888 if (RD->isUnion()) { 1889 const FieldDecl *LargestFD = 0; 1890 CharUnits UnionSize = CharUnits::Zero(); 1891 1892 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1893 i != e; ++i) { 1894 const FieldDecl *FD = *i; 1895 assert(!FD->isBitField() && 1896 "Cannot expand structure with bit-field members."); 1897 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType()); 1898 if (UnionSize < FieldSize) { 1899 UnionSize = FieldSize; 1900 LargestFD = FD; 1901 } 1902 } 1903 if (LargestFD) { 1904 RValue FldRV = EmitRValueForField(LV, LargestFD); 1905 ExpandTypeToArgs(LargestFD->getType(), FldRV, Args, IRFuncTy); 1906 } 1907 } else { 1908 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1909 i != e; ++i) { 1910 FieldDecl *FD = *i; 1911 1912 RValue FldRV = EmitRValueForField(LV, FD); 1913 ExpandTypeToArgs(FD->getType(), FldRV, Args, IRFuncTy); 1914 } 1915 } 1916 } else if (Ty->isAnyComplexType()) { 1917 ComplexPairTy CV = RV.getComplexVal(); 1918 Args.push_back(CV.first); 1919 Args.push_back(CV.second); 1920 } else { 1921 assert(RV.isScalar() && 1922 "Unexpected non-scalar rvalue during struct expansion."); 1923 1924 // Insert a bitcast as needed. 1925 llvm::Value *V = RV.getScalarVal(); 1926 if (Args.size() < IRFuncTy->getNumParams() && 1927 V->getType() != IRFuncTy->getParamType(Args.size())) 1928 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size())); 1929 1930 Args.push_back(V); 1931 } 1932} 1933 1934 1935RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 1936 llvm::Value *Callee, 1937 ReturnValueSlot ReturnValue, 1938 const CallArgList &CallArgs, 1939 const Decl *TargetDecl, 1940 llvm::Instruction **callOrInvoke) { 1941 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 1942 SmallVector<llvm::Value*, 16> Args; 1943 1944 // Handle struct-return functions by passing a pointer to the 1945 // location that we would like to return into. 1946 QualType RetTy = CallInfo.getReturnType(); 1947 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 1948 1949 // IRArgNo - Keep track of the argument number in the callee we're looking at. 1950 unsigned IRArgNo = 0; 1951 llvm::FunctionType *IRFuncTy = 1952 cast<llvm::FunctionType>( 1953 cast<llvm::PointerType>(Callee->getType())->getElementType()); 1954 1955 // If the call returns a temporary with struct return, create a temporary 1956 // alloca to hold the result, unless one is given to us. 1957 if (CGM.ReturnTypeUsesSRet(CallInfo)) { 1958 llvm::Value *Value = ReturnValue.getValue(); 1959 if (!Value) 1960 Value = CreateMemTemp(RetTy); 1961 Args.push_back(Value); 1962 checkArgMatches(Value, IRArgNo, IRFuncTy); 1963 } 1964 1965 assert(CallInfo.arg_size() == CallArgs.size() && 1966 "Mismatch between function signature & arguments."); 1967 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 1968 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 1969 I != E; ++I, ++info_it) { 1970 const ABIArgInfo &ArgInfo = info_it->info; 1971 RValue RV = I->RV; 1972 1973 unsigned TypeAlign = 1974 getContext().getTypeAlignInChars(I->Ty).getQuantity(); 1975 switch (ArgInfo.getKind()) { 1976 case ABIArgInfo::Indirect: { 1977 if (RV.isScalar() || RV.isComplex()) { 1978 // Make a temporary alloca to pass the argument. 1979 llvm::AllocaInst *AI = CreateMemTemp(I->Ty); 1980 if (ArgInfo.getIndirectAlign() > AI->getAlignment()) 1981 AI->setAlignment(ArgInfo.getIndirectAlign()); 1982 Args.push_back(AI); 1983 1984 if (RV.isScalar()) 1985 EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, 1986 TypeAlign, I->Ty); 1987 else 1988 StoreComplexToAddr(RV.getComplexVal(), Args.back(), false); 1989 1990 // Validate argument match. 1991 checkArgMatches(AI, IRArgNo, IRFuncTy); 1992 } else { 1993 // We want to avoid creating an unnecessary temporary+copy here; 1994 // however, we need one in two cases: 1995 // 1. If the argument is not byval, and we are required to copy the 1996 // source. (This case doesn't occur on any common architecture.) 1997 // 2. If the argument is byval, RV is not sufficiently aligned, and 1998 // we cannot force it to be sufficiently aligned. 1999 llvm::Value *Addr = RV.getAggregateAddr(); 2000 unsigned Align = ArgInfo.getIndirectAlign(); 2001 const llvm::DataLayout *TD = &CGM.getDataLayout(); 2002 if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) || 2003 (ArgInfo.getIndirectByVal() && TypeAlign < Align && 2004 llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align)) { 2005 // Create an aligned temporary, and copy to it. 2006 llvm::AllocaInst *AI = CreateMemTemp(I->Ty); 2007 if (Align > AI->getAlignment()) 2008 AI->setAlignment(Align); 2009 Args.push_back(AI); 2010 EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified()); 2011 2012 // Validate argument match. 2013 checkArgMatches(AI, IRArgNo, IRFuncTy); 2014 } else { 2015 // Skip the extra memcpy call. 2016 Args.push_back(Addr); 2017 2018 // Validate argument match. 2019 checkArgMatches(Addr, IRArgNo, IRFuncTy); 2020 } 2021 } 2022 break; 2023 } 2024 2025 case ABIArgInfo::Ignore: 2026 break; 2027 2028 case ABIArgInfo::Extend: 2029 case ABIArgInfo::Direct: { 2030 // Insert a padding argument to ensure proper alignment. 2031 if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) { 2032 Args.push_back(llvm::UndefValue::get(PaddingType)); 2033 ++IRArgNo; 2034 } 2035 2036 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 2037 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 2038 ArgInfo.getDirectOffset() == 0) { 2039 llvm::Value *V; 2040 if (RV.isScalar()) 2041 V = RV.getScalarVal(); 2042 else 2043 V = Builder.CreateLoad(RV.getAggregateAddr()); 2044 2045 // If the argument doesn't match, perform a bitcast to coerce it. This 2046 // can happen due to trivial type mismatches. 2047 if (IRArgNo < IRFuncTy->getNumParams() && 2048 V->getType() != IRFuncTy->getParamType(IRArgNo)) 2049 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo)); 2050 Args.push_back(V); 2051 2052 checkArgMatches(V, IRArgNo, IRFuncTy); 2053 break; 2054 } 2055 2056 // FIXME: Avoid the conversion through memory if possible. 2057 llvm::Value *SrcPtr; 2058 if (RV.isScalar()) { 2059 SrcPtr = CreateMemTemp(I->Ty, "coerce"); 2060 EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, TypeAlign, I->Ty); 2061 } else if (RV.isComplex()) { 2062 SrcPtr = CreateMemTemp(I->Ty, "coerce"); 2063 StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false); 2064 } else 2065 SrcPtr = RV.getAggregateAddr(); 2066 2067 // If the value is offset in memory, apply the offset now. 2068 if (unsigned Offs = ArgInfo.getDirectOffset()) { 2069 SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy()); 2070 SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs); 2071 SrcPtr = Builder.CreateBitCast(SrcPtr, 2072 llvm::PointerType::getUnqual(ArgInfo.getCoerceToType())); 2073 2074 } 2075 2076 // If the coerce-to type is a first class aggregate, we flatten it and 2077 // pass the elements. Either way is semantically identical, but fast-isel 2078 // and the optimizer generally likes scalar values better than FCAs. 2079 if (llvm::StructType *STy = 2080 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) { 2081 llvm::Type *SrcTy = 2082 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 2083 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); 2084 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); 2085 2086 // If the source type is smaller than the destination type of the 2087 // coerce-to logic, copy the source value into a temp alloca the size 2088 // of the destination type to allow loading all of it. The bits past 2089 // the source value are left undef. 2090 if (SrcSize < DstSize) { 2091 llvm::AllocaInst *TempAlloca 2092 = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce"); 2093 Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0); 2094 SrcPtr = TempAlloca; 2095 } else { 2096 SrcPtr = Builder.CreateBitCast(SrcPtr, 2097 llvm::PointerType::getUnqual(STy)); 2098 } 2099 2100 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2101 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i); 2102 llvm::LoadInst *LI = Builder.CreateLoad(EltPtr); 2103 // We don't know what we're loading from. 2104 LI->setAlignment(1); 2105 Args.push_back(LI); 2106 2107 // Validate argument match. 2108 checkArgMatches(LI, IRArgNo, IRFuncTy); 2109 } 2110 } else { 2111 // In the simple case, just pass the coerced loaded value. 2112 Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), 2113 *this)); 2114 2115 // Validate argument match. 2116 checkArgMatches(Args.back(), IRArgNo, IRFuncTy); 2117 } 2118 2119 break; 2120 } 2121 2122 case ABIArgInfo::Expand: 2123 ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy); 2124 IRArgNo = Args.size(); 2125 break; 2126 } 2127 } 2128 2129 // If the callee is a bitcast of a function to a varargs pointer to function 2130 // type, check to see if we can remove the bitcast. This handles some cases 2131 // with unprototyped functions. 2132 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee)) 2133 if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) { 2134 llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType()); 2135 llvm::FunctionType *CurFT = 2136 cast<llvm::FunctionType>(CurPT->getElementType()); 2137 llvm::FunctionType *ActualFT = CalleeF->getFunctionType(); 2138 2139 if (CE->getOpcode() == llvm::Instruction::BitCast && 2140 ActualFT->getReturnType() == CurFT->getReturnType() && 2141 ActualFT->getNumParams() == CurFT->getNumParams() && 2142 ActualFT->getNumParams() == Args.size() && 2143 (CurFT->isVarArg() || !ActualFT->isVarArg())) { 2144 bool ArgsMatch = true; 2145 for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i) 2146 if (ActualFT->getParamType(i) != CurFT->getParamType(i)) { 2147 ArgsMatch = false; 2148 break; 2149 } 2150 2151 // Strip the cast if we can get away with it. This is a nice cleanup, 2152 // but also allows us to inline the function at -O0 if it is marked 2153 // always_inline. 2154 if (ArgsMatch) 2155 Callee = CalleeF; 2156 } 2157 } 2158 2159 unsigned CallingConv; 2160 CodeGen::AttributeListType AttributeList; 2161 CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv); 2162 llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList); 2163 2164 llvm::BasicBlock *InvokeDest = 0; 2165 if (!Attrs.getFnAttributes().hasAttribute(llvm::Attributes::NoUnwind)) 2166 InvokeDest = getInvokeDest(); 2167 2168 llvm::CallSite CS; 2169 if (!InvokeDest) { 2170 CS = Builder.CreateCall(Callee, Args); 2171 } else { 2172 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 2173 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args); 2174 EmitBlock(Cont); 2175 } 2176 if (callOrInvoke) 2177 *callOrInvoke = CS.getInstruction(); 2178 2179 CS.setAttributes(Attrs); 2180 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 2181 2182 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 2183 // optimizer it can aggressively ignore unwind edges. 2184 if (CGM.getLangOpts().ObjCAutoRefCount) 2185 AddObjCARCExceptionMetadata(CS.getInstruction()); 2186 2187 // If the call doesn't return, finish the basic block and clear the 2188 // insertion point; this allows the rest of IRgen to discard 2189 // unreachable code. 2190 if (CS.doesNotReturn()) { 2191 Builder.CreateUnreachable(); 2192 Builder.ClearInsertionPoint(); 2193 2194 // FIXME: For now, emit a dummy basic block because expr emitters in 2195 // generally are not ready to handle emitting expressions at unreachable 2196 // points. 2197 EnsureInsertPoint(); 2198 2199 // Return a reasonable RValue. 2200 return GetUndefRValue(RetTy); 2201 } 2202 2203 llvm::Instruction *CI = CS.getInstruction(); 2204 if (Builder.isNamePreserving() && !CI->getType()->isVoidTy()) 2205 CI->setName("call"); 2206 2207 // Emit any writebacks immediately. Arguably this should happen 2208 // after any return-value munging. 2209 if (CallArgs.hasWritebacks()) 2210 emitWritebacks(*this, CallArgs); 2211 2212 switch (RetAI.getKind()) { 2213 case ABIArgInfo::Indirect: { 2214 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity(); 2215 if (RetTy->isAnyComplexType()) 2216 return RValue::getComplex(LoadComplexFromAddr(Args[0], false)); 2217 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) 2218 return RValue::getAggregate(Args[0]); 2219 return RValue::get(EmitLoadOfScalar(Args[0], false, Alignment, RetTy)); 2220 } 2221 2222 case ABIArgInfo::Ignore: 2223 // If we are ignoring an argument that had a result, make sure to 2224 // construct the appropriate return value for our caller. 2225 return GetUndefRValue(RetTy); 2226 2227 case ABIArgInfo::Extend: 2228 case ABIArgInfo::Direct: { 2229 llvm::Type *RetIRTy = ConvertType(RetTy); 2230 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { 2231 if (RetTy->isAnyComplexType()) { 2232 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 2233 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 2234 return RValue::getComplex(std::make_pair(Real, Imag)); 2235 } 2236 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 2237 llvm::Value *DestPtr = ReturnValue.getValue(); 2238 bool DestIsVolatile = ReturnValue.isVolatile(); 2239 2240 if (!DestPtr) { 2241 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 2242 DestIsVolatile = false; 2243 } 2244 BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false); 2245 return RValue::getAggregate(DestPtr); 2246 } 2247 2248 // If the argument doesn't match, perform a bitcast to coerce it. This 2249 // can happen due to trivial type mismatches. 2250 llvm::Value *V = CI; 2251 if (V->getType() != RetIRTy) 2252 V = Builder.CreateBitCast(V, RetIRTy); 2253 return RValue::get(V); 2254 } 2255 2256 llvm::Value *DestPtr = ReturnValue.getValue(); 2257 bool DestIsVolatile = ReturnValue.isVolatile(); 2258 2259 if (!DestPtr) { 2260 DestPtr = CreateMemTemp(RetTy, "coerce"); 2261 DestIsVolatile = false; 2262 } 2263 2264 // If the value is offset in memory, apply the offset now. 2265 llvm::Value *StorePtr = DestPtr; 2266 if (unsigned Offs = RetAI.getDirectOffset()) { 2267 StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy()); 2268 StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs); 2269 StorePtr = Builder.CreateBitCast(StorePtr, 2270 llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 2271 } 2272 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); 2273 2274 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity(); 2275 if (RetTy->isAnyComplexType()) 2276 return RValue::getComplex(LoadComplexFromAddr(DestPtr, false)); 2277 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) 2278 return RValue::getAggregate(DestPtr); 2279 return RValue::get(EmitLoadOfScalar(DestPtr, false, Alignment, RetTy)); 2280 } 2281 2282 case ABIArgInfo::Expand: 2283 llvm_unreachable("Invalid ABI kind for return argument"); 2284 } 2285 2286 llvm_unreachable("Unhandled ABIArgInfo::Kind"); 2287} 2288 2289/* VarArg handling */ 2290 2291llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) { 2292 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this); 2293} 2294