CGCall.cpp revision cd8ab51a44e80625d84126780b0d85a7732e25af
1//===--- CGCall.cpp - Encapsulate calling convention details ----*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// These classes wrap the information about a call or function 11// definition used to handle ABI compliancy. 12// 13//===----------------------------------------------------------------------===// 14 15#include "CGCall.h" 16#include "ABIInfo.h" 17#include "CGCXXABI.h" 18#include "CodeGenFunction.h" 19#include "CodeGenModule.h" 20#include "TargetInfo.h" 21#include "clang/AST/Decl.h" 22#include "clang/AST/DeclCXX.h" 23#include "clang/AST/DeclObjC.h" 24#include "clang/Basic/TargetInfo.h" 25#include "clang/Frontend/CodeGenOptions.h" 26#include "llvm/IR/Attributes.h" 27#include "llvm/IR/DataLayout.h" 28#include "llvm/IR/InlineAsm.h" 29#include "llvm/Support/CallSite.h" 30#include "llvm/Transforms/Utils/Local.h" 31using namespace clang; 32using namespace CodeGen; 33 34/***/ 35 36static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) { 37 switch (CC) { 38 default: return llvm::CallingConv::C; 39 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 40 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 41 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 42 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; 43 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 44 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; 45 // TODO: add support for CC_X86Pascal to llvm 46 } 47} 48 49/// Derives the 'this' type for codegen purposes, i.e. ignoring method 50/// qualification. 51/// FIXME: address space qualification? 52static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) { 53 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 54 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 55} 56 57/// Returns the canonical formal type of the given C++ method. 58static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 59 return MD->getType()->getCanonicalTypeUnqualified() 60 .getAs<FunctionProtoType>(); 61} 62 63/// Returns the "extra-canonicalized" return type, which discards 64/// qualifiers on the return type. Codegen doesn't care about them, 65/// and it makes ABI code a little easier to be able to assume that 66/// all parameter and return types are top-level unqualified. 67static CanQualType GetReturnType(QualType RetTy) { 68 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 69} 70 71/// Arrange the argument and result information for a value of the given 72/// unprototyped freestanding function type. 73const CGFunctionInfo & 74CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { 75 // When translating an unprototyped function type, always use a 76 // variadic type. 77 return arrangeLLVMFunctionInfo(FTNP->getResultType().getUnqualifiedType(), 78 ArrayRef<CanQualType>(), 79 FTNP->getExtInfo(), 80 RequiredArgs(0)); 81} 82 83/// Arrange the LLVM function layout for a value of the given function 84/// type, on top of any implicit parameters already stored. Use the 85/// given ExtInfo instead of the ExtInfo from the function type. 86static const CGFunctionInfo &arrangeLLVMFunctionInfo(CodeGenTypes &CGT, 87 SmallVectorImpl<CanQualType> &prefix, 88 CanQual<FunctionProtoType> FTP, 89 FunctionType::ExtInfo extInfo) { 90 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); 91 // FIXME: Kill copy. 92 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) 93 prefix.push_back(FTP->getArgType(i)); 94 CanQualType resultType = FTP->getResultType().getUnqualifiedType(); 95 return CGT.arrangeLLVMFunctionInfo(resultType, prefix, extInfo, required); 96} 97 98/// Arrange the argument and result information for a free function (i.e. 99/// not a C++ or ObjC instance method) of the given type. 100static const CGFunctionInfo &arrangeFreeFunctionType(CodeGenTypes &CGT, 101 SmallVectorImpl<CanQualType> &prefix, 102 CanQual<FunctionProtoType> FTP) { 103 return arrangeLLVMFunctionInfo(CGT, prefix, FTP, FTP->getExtInfo()); 104} 105 106/// Given the formal ext-info of a C++ instance method, adjust it 107/// according to the C++ ABI in effect. 108static void adjustCXXMethodInfo(CodeGenTypes &CGT, 109 FunctionType::ExtInfo &extInfo, 110 bool isVariadic) { 111 if (extInfo.getCC() == CC_Default) { 112 CallingConv CC = CGT.getContext().getDefaultCXXMethodCallConv(isVariadic); 113 extInfo = extInfo.withCallingConv(CC); 114 } 115} 116 117/// Arrange the argument and result information for a free function (i.e. 118/// not a C++ or ObjC instance method) of the given type. 119static const CGFunctionInfo &arrangeCXXMethodType(CodeGenTypes &CGT, 120 SmallVectorImpl<CanQualType> &prefix, 121 CanQual<FunctionProtoType> FTP) { 122 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 123 adjustCXXMethodInfo(CGT, extInfo, FTP->isVariadic()); 124 return arrangeLLVMFunctionInfo(CGT, prefix, FTP, extInfo); 125} 126 127/// Arrange the argument and result information for a value of the 128/// given freestanding function type. 129const CGFunctionInfo & 130CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) { 131 SmallVector<CanQualType, 16> argTypes; 132 return ::arrangeFreeFunctionType(*this, argTypes, FTP); 133} 134 135static CallingConv getCallingConventionForDecl(const Decl *D) { 136 // Set the appropriate calling convention for the Function. 137 if (D->hasAttr<StdCallAttr>()) 138 return CC_X86StdCall; 139 140 if (D->hasAttr<FastCallAttr>()) 141 return CC_X86FastCall; 142 143 if (D->hasAttr<ThisCallAttr>()) 144 return CC_X86ThisCall; 145 146 if (D->hasAttr<PascalAttr>()) 147 return CC_X86Pascal; 148 149 if (PcsAttr *PCS = D->getAttr<PcsAttr>()) 150 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); 151 152 if (D->hasAttr<PnaclCallAttr>()) 153 return CC_PnaclCall; 154 155 if (D->hasAttr<IntelOclBiccAttr>()) 156 return CC_IntelOclBicc; 157 158 return CC_C; 159} 160 161/// Arrange the argument and result information for a call to an 162/// unknown C++ non-static member function of the given abstract type. 163/// The member function must be an ordinary function, i.e. not a 164/// constructor or destructor. 165const CGFunctionInfo & 166CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, 167 const FunctionProtoType *FTP) { 168 SmallVector<CanQualType, 16> argTypes; 169 170 // Add the 'this' pointer. 171 argTypes.push_back(GetThisType(Context, RD)); 172 173 return ::arrangeCXXMethodType(*this, argTypes, 174 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); 175} 176 177/// Arrange the argument and result information for a declaration or 178/// definition of the given C++ non-static member function. The 179/// member function must be an ordinary function, i.e. not a 180/// constructor or destructor. 181const CGFunctionInfo & 182CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { 183 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!"); 184 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 185 186 CanQual<FunctionProtoType> prototype = GetFormalType(MD); 187 188 if (MD->isInstance()) { 189 // The abstract case is perfectly fine. 190 return arrangeCXXMethodType(MD->getParent(), prototype.getTypePtr()); 191 } 192 193 return arrangeFreeFunctionType(prototype); 194} 195 196/// Arrange the argument and result information for a declaration 197/// or definition to the given constructor variant. 198const CGFunctionInfo & 199CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D, 200 CXXCtorType ctorKind) { 201 SmallVector<CanQualType, 16> argTypes; 202 argTypes.push_back(GetThisType(Context, D->getParent())); 203 CanQualType resultType = Context.VoidTy; 204 205 TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes); 206 207 CanQual<FunctionProtoType> FTP = GetFormalType(D); 208 209 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size()); 210 211 // Add the formal parameters. 212 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) 213 argTypes.push_back(FTP->getArgType(i)); 214 215 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 216 adjustCXXMethodInfo(*this, extInfo, FTP->isVariadic()); 217 return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo, required); 218} 219 220/// Arrange the argument and result information for a declaration, 221/// definition, or call to the given destructor variant. It so 222/// happens that all three cases produce the same information. 223const CGFunctionInfo & 224CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D, 225 CXXDtorType dtorKind) { 226 SmallVector<CanQualType, 2> argTypes; 227 argTypes.push_back(GetThisType(Context, D->getParent())); 228 CanQualType resultType = Context.VoidTy; 229 230 TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes); 231 232 CanQual<FunctionProtoType> FTP = GetFormalType(D); 233 assert(FTP->getNumArgs() == 0 && "dtor with formal parameters"); 234 assert(FTP->isVariadic() == 0 && "dtor with formal parameters"); 235 236 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 237 adjustCXXMethodInfo(*this, extInfo, false); 238 return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo, 239 RequiredArgs::All); 240} 241 242/// Arrange the argument and result information for the declaration or 243/// definition of the given function. 244const CGFunctionInfo & 245CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { 246 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 247 if (MD->isInstance()) 248 return arrangeCXXMethodDeclaration(MD); 249 250 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 251 252 assert(isa<FunctionType>(FTy)); 253 254 // When declaring a function without a prototype, always use a 255 // non-variadic type. 256 if (isa<FunctionNoProtoType>(FTy)) { 257 CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>(); 258 return arrangeLLVMFunctionInfo(noProto->getResultType(), 259 ArrayRef<CanQualType>(), 260 noProto->getExtInfo(), 261 RequiredArgs::All); 262 } 263 264 assert(isa<FunctionProtoType>(FTy)); 265 return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>()); 266} 267 268/// Arrange the argument and result information for the declaration or 269/// definition of an Objective-C method. 270const CGFunctionInfo & 271CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { 272 // It happens that this is the same as a call with no optional 273 // arguments, except also using the formal 'self' type. 274 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); 275} 276 277/// Arrange the argument and result information for the function type 278/// through which to perform a send to the given Objective-C method, 279/// using the given receiver type. The receiver type is not always 280/// the 'self' type of the method or even an Objective-C pointer type. 281/// This is *not* the right method for actually performing such a 282/// message send, due to the possibility of optional arguments. 283const CGFunctionInfo & 284CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, 285 QualType receiverType) { 286 SmallVector<CanQualType, 16> argTys; 287 argTys.push_back(Context.getCanonicalParamType(receiverType)); 288 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 289 // FIXME: Kill copy? 290 for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(), 291 e = MD->param_end(); i != e; ++i) { 292 argTys.push_back(Context.getCanonicalParamType((*i)->getType())); 293 } 294 295 FunctionType::ExtInfo einfo; 296 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD)); 297 298 if (getContext().getLangOpts().ObjCAutoRefCount && 299 MD->hasAttr<NSReturnsRetainedAttr>()) 300 einfo = einfo.withProducesResult(true); 301 302 RequiredArgs required = 303 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); 304 305 return arrangeLLVMFunctionInfo(GetReturnType(MD->getResultType()), argTys, 306 einfo, required); 307} 308 309const CGFunctionInfo & 310CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { 311 // FIXME: Do we need to handle ObjCMethodDecl? 312 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 313 314 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 315 return arrangeCXXConstructorDeclaration(CD, GD.getCtorType()); 316 317 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) 318 return arrangeCXXDestructor(DD, GD.getDtorType()); 319 320 return arrangeFunctionDeclaration(FD); 321} 322 323/// Arrange a call as unto a free function, except possibly with an 324/// additional number of formal parameters considered required. 325static const CGFunctionInfo & 326arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, 327 const CallArgList &args, 328 const FunctionType *fnType, 329 unsigned numExtraRequiredArgs) { 330 assert(args.size() >= numExtraRequiredArgs); 331 332 // In most cases, there are no optional arguments. 333 RequiredArgs required = RequiredArgs::All; 334 335 // If we have a variadic prototype, the required arguments are the 336 // extra prefix plus the arguments in the prototype. 337 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { 338 if (proto->isVariadic()) 339 required = RequiredArgs(proto->getNumArgs() + numExtraRequiredArgs); 340 341 // If we don't have a prototype at all, but we're supposed to 342 // explicitly use the variadic convention for unprototyped calls, 343 // treat all of the arguments as required but preserve the nominal 344 // possibility of variadics. 345 } else if (CGT.CGM.getTargetCodeGenInfo() 346 .isNoProtoCallVariadic(args, cast<FunctionNoProtoType>(fnType))) { 347 required = RequiredArgs(args.size()); 348 } 349 350 return CGT.arrangeFreeFunctionCall(fnType->getResultType(), args, 351 fnType->getExtInfo(), required); 352} 353 354/// Figure out the rules for calling a function with the given formal 355/// type using the given arguments. The arguments are necessary 356/// because the function might be unprototyped, in which case it's 357/// target-dependent in crazy ways. 358const CGFunctionInfo & 359CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, 360 const FunctionType *fnType) { 361 return arrangeFreeFunctionLikeCall(*this, args, fnType, 0); 362} 363 364/// A block function call is essentially a free-function call with an 365/// extra implicit argument. 366const CGFunctionInfo & 367CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, 368 const FunctionType *fnType) { 369 return arrangeFreeFunctionLikeCall(*this, args, fnType, 1); 370} 371 372const CGFunctionInfo & 373CodeGenTypes::arrangeFreeFunctionCall(QualType resultType, 374 const CallArgList &args, 375 FunctionType::ExtInfo info, 376 RequiredArgs required) { 377 // FIXME: Kill copy. 378 SmallVector<CanQualType, 16> argTypes; 379 for (CallArgList::const_iterator i = args.begin(), e = args.end(); 380 i != e; ++i) 381 argTypes.push_back(Context.getCanonicalParamType(i->Ty)); 382 return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info, 383 required); 384} 385 386/// Arrange a call to a C++ method, passing the given arguments. 387const CGFunctionInfo & 388CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, 389 const FunctionProtoType *FPT, 390 RequiredArgs required) { 391 // FIXME: Kill copy. 392 SmallVector<CanQualType, 16> argTypes; 393 for (CallArgList::const_iterator i = args.begin(), e = args.end(); 394 i != e; ++i) 395 argTypes.push_back(Context.getCanonicalParamType(i->Ty)); 396 397 FunctionType::ExtInfo info = FPT->getExtInfo(); 398 adjustCXXMethodInfo(*this, info, FPT->isVariadic()); 399 return arrangeLLVMFunctionInfo(GetReturnType(FPT->getResultType()), 400 argTypes, info, required); 401} 402 403const CGFunctionInfo & 404CodeGenTypes::arrangeFunctionDeclaration(QualType resultType, 405 const FunctionArgList &args, 406 const FunctionType::ExtInfo &info, 407 bool isVariadic) { 408 // FIXME: Kill copy. 409 SmallVector<CanQualType, 16> argTypes; 410 for (FunctionArgList::const_iterator i = args.begin(), e = args.end(); 411 i != e; ++i) 412 argTypes.push_back(Context.getCanonicalParamType((*i)->getType())); 413 414 RequiredArgs required = 415 (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All); 416 return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info, 417 required); 418} 419 420const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { 421 return arrangeLLVMFunctionInfo(getContext().VoidTy, ArrayRef<CanQualType>(), 422 FunctionType::ExtInfo(), RequiredArgs::All); 423} 424 425/// Arrange the argument and result information for an abstract value 426/// of a given function type. This is the method which all of the 427/// above functions ultimately defer to. 428const CGFunctionInfo & 429CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, 430 ArrayRef<CanQualType> argTypes, 431 FunctionType::ExtInfo info, 432 RequiredArgs required) { 433#ifndef NDEBUG 434 for (ArrayRef<CanQualType>::const_iterator 435 I = argTypes.begin(), E = argTypes.end(); I != E; ++I) 436 assert(I->isCanonicalAsParam()); 437#endif 438 439 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); 440 441 // Lookup or create unique function info. 442 llvm::FoldingSetNodeID ID; 443 CGFunctionInfo::Profile(ID, info, required, resultType, argTypes); 444 445 void *insertPos = 0; 446 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); 447 if (FI) 448 return *FI; 449 450 // Construct the function info. We co-allocate the ArgInfos. 451 FI = CGFunctionInfo::create(CC, info, resultType, argTypes, required); 452 FunctionInfos.InsertNode(FI, insertPos); 453 454 bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted; 455 assert(inserted && "Recursively being processed?"); 456 457 // Compute ABI information. 458 getABIInfo().computeInfo(*FI); 459 460 // Loop over all of the computed argument and return value info. If any of 461 // them are direct or extend without a specified coerce type, specify the 462 // default now. 463 ABIArgInfo &retInfo = FI->getReturnInfo(); 464 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == 0) 465 retInfo.setCoerceToType(ConvertType(FI->getReturnType())); 466 467 for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end(); 468 I != E; ++I) 469 if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0) 470 I->info.setCoerceToType(ConvertType(I->type)); 471 472 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; 473 assert(erased && "Not in set?"); 474 475 return *FI; 476} 477 478CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, 479 const FunctionType::ExtInfo &info, 480 CanQualType resultType, 481 ArrayRef<CanQualType> argTypes, 482 RequiredArgs required) { 483 void *buffer = operator new(sizeof(CGFunctionInfo) + 484 sizeof(ArgInfo) * (argTypes.size() + 1)); 485 CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); 486 FI->CallingConvention = llvmCC; 487 FI->EffectiveCallingConvention = llvmCC; 488 FI->ASTCallingConvention = info.getCC(); 489 FI->NoReturn = info.getNoReturn(); 490 FI->ReturnsRetained = info.getProducesResult(); 491 FI->Required = required; 492 FI->HasRegParm = info.getHasRegParm(); 493 FI->RegParm = info.getRegParm(); 494 FI->NumArgs = argTypes.size(); 495 FI->getArgsBuffer()[0].type = resultType; 496 for (unsigned i = 0, e = argTypes.size(); i != e; ++i) 497 FI->getArgsBuffer()[i + 1].type = argTypes[i]; 498 return FI; 499} 500 501/***/ 502 503void CodeGenTypes::GetExpandedTypes(QualType type, 504 SmallVectorImpl<llvm::Type*> &expandedTypes) { 505 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) { 506 uint64_t NumElts = AT->getSize().getZExtValue(); 507 for (uint64_t Elt = 0; Elt < NumElts; ++Elt) 508 GetExpandedTypes(AT->getElementType(), expandedTypes); 509 } else if (const RecordType *RT = type->getAs<RecordType>()) { 510 const RecordDecl *RD = RT->getDecl(); 511 assert(!RD->hasFlexibleArrayMember() && 512 "Cannot expand structure with flexible array."); 513 if (RD->isUnion()) { 514 // Unions can be here only in degenerative cases - all the fields are same 515 // after flattening. Thus we have to use the "largest" field. 516 const FieldDecl *LargestFD = 0; 517 CharUnits UnionSize = CharUnits::Zero(); 518 519 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 520 i != e; ++i) { 521 const FieldDecl *FD = *i; 522 assert(!FD->isBitField() && 523 "Cannot expand structure with bit-field members."); 524 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType()); 525 if (UnionSize < FieldSize) { 526 UnionSize = FieldSize; 527 LargestFD = FD; 528 } 529 } 530 if (LargestFD) 531 GetExpandedTypes(LargestFD->getType(), expandedTypes); 532 } else { 533 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 534 i != e; ++i) { 535 assert(!i->isBitField() && 536 "Cannot expand structure with bit-field members."); 537 GetExpandedTypes(i->getType(), expandedTypes); 538 } 539 } 540 } else if (const ComplexType *CT = type->getAs<ComplexType>()) { 541 llvm::Type *EltTy = ConvertType(CT->getElementType()); 542 expandedTypes.push_back(EltTy); 543 expandedTypes.push_back(EltTy); 544 } else 545 expandedTypes.push_back(ConvertType(type)); 546} 547 548llvm::Function::arg_iterator 549CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, 550 llvm::Function::arg_iterator AI) { 551 assert(LV.isSimple() && 552 "Unexpected non-simple lvalue during struct expansion."); 553 554 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 555 unsigned NumElts = AT->getSize().getZExtValue(); 556 QualType EltTy = AT->getElementType(); 557 for (unsigned Elt = 0; Elt < NumElts; ++Elt) { 558 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, Elt); 559 LValue LV = MakeAddrLValue(EltAddr, EltTy); 560 AI = ExpandTypeFromArgs(EltTy, LV, AI); 561 } 562 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 563 RecordDecl *RD = RT->getDecl(); 564 if (RD->isUnion()) { 565 // Unions can be here only in degenerative cases - all the fields are same 566 // after flattening. Thus we have to use the "largest" field. 567 const FieldDecl *LargestFD = 0; 568 CharUnits UnionSize = CharUnits::Zero(); 569 570 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 571 i != e; ++i) { 572 const FieldDecl *FD = *i; 573 assert(!FD->isBitField() && 574 "Cannot expand structure with bit-field members."); 575 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType()); 576 if (UnionSize < FieldSize) { 577 UnionSize = FieldSize; 578 LargestFD = FD; 579 } 580 } 581 if (LargestFD) { 582 // FIXME: What are the right qualifiers here? 583 LValue SubLV = EmitLValueForField(LV, LargestFD); 584 AI = ExpandTypeFromArgs(LargestFD->getType(), SubLV, AI); 585 } 586 } else { 587 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 588 i != e; ++i) { 589 FieldDecl *FD = *i; 590 QualType FT = FD->getType(); 591 592 // FIXME: What are the right qualifiers here? 593 LValue SubLV = EmitLValueForField(LV, FD); 594 AI = ExpandTypeFromArgs(FT, SubLV, AI); 595 } 596 } 597 } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 598 QualType EltTy = CT->getElementType(); 599 llvm::Value *RealAddr = Builder.CreateStructGEP(LV.getAddress(), 0, "real"); 600 EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy)); 601 llvm::Value *ImagAddr = Builder.CreateStructGEP(LV.getAddress(), 1, "imag"); 602 EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy)); 603 } else { 604 EmitStoreThroughLValue(RValue::get(AI), LV); 605 ++AI; 606 } 607 608 return AI; 609} 610 611/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 612/// accessing some number of bytes out of it, try to gep into the struct to get 613/// at its inner goodness. Dive as deep as possible without entering an element 614/// with an in-memory size smaller than DstSize. 615static llvm::Value * 616EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr, 617 llvm::StructType *SrcSTy, 618 uint64_t DstSize, CodeGenFunction &CGF) { 619 // We can't dive into a zero-element struct. 620 if (SrcSTy->getNumElements() == 0) return SrcPtr; 621 622 llvm::Type *FirstElt = SrcSTy->getElementType(0); 623 624 // If the first elt is at least as large as what we're looking for, or if the 625 // first element is the same size as the whole struct, we can enter it. 626 uint64_t FirstEltSize = 627 CGF.CGM.getDataLayout().getTypeAllocSize(FirstElt); 628 if (FirstEltSize < DstSize && 629 FirstEltSize < CGF.CGM.getDataLayout().getTypeAllocSize(SrcSTy)) 630 return SrcPtr; 631 632 // GEP into the first element. 633 SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive"); 634 635 // If the first element is a struct, recurse. 636 llvm::Type *SrcTy = 637 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 638 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 639 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 640 641 return SrcPtr; 642} 643 644/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 645/// are either integers or pointers. This does a truncation of the value if it 646/// is too large or a zero extension if it is too small. 647static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 648 llvm::Type *Ty, 649 CodeGenFunction &CGF) { 650 if (Val->getType() == Ty) 651 return Val; 652 653 if (isa<llvm::PointerType>(Val->getType())) { 654 // If this is Pointer->Pointer avoid conversion to and from int. 655 if (isa<llvm::PointerType>(Ty)) 656 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 657 658 // Convert the pointer to an integer so we can play with its width. 659 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 660 } 661 662 llvm::Type *DestIntTy = Ty; 663 if (isa<llvm::PointerType>(DestIntTy)) 664 DestIntTy = CGF.IntPtrTy; 665 666 if (Val->getType() != DestIntTy) 667 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 668 669 if (isa<llvm::PointerType>(Ty)) 670 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 671 return Val; 672} 673 674 675 676/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 677/// a pointer to an object of type \arg Ty. 678/// 679/// This safely handles the case when the src type is smaller than the 680/// destination type; in this situation the values of bits which not 681/// present in the src are undefined. 682static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr, 683 llvm::Type *Ty, 684 CodeGenFunction &CGF) { 685 llvm::Type *SrcTy = 686 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 687 688 // If SrcTy and Ty are the same, just do a load. 689 if (SrcTy == Ty) 690 return CGF.Builder.CreateLoad(SrcPtr); 691 692 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); 693 694 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 695 SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 696 SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 697 } 698 699 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 700 701 // If the source and destination are integer or pointer types, just do an 702 // extension or truncation to the desired type. 703 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 704 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 705 llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr); 706 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 707 } 708 709 // If load is legal, just bitcast the src pointer. 710 if (SrcSize >= DstSize) { 711 // Generally SrcSize is never greater than DstSize, since this means we are 712 // losing bits. However, this can happen in cases where the structure has 713 // additional padding, for example due to a user specified alignment. 714 // 715 // FIXME: Assert that we aren't truncating non-padding bits when have access 716 // to that information. 717 llvm::Value *Casted = 718 CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty)); 719 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); 720 // FIXME: Use better alignment / avoid requiring aligned load. 721 Load->setAlignment(1); 722 return Load; 723 } 724 725 // Otherwise do coercion through memory. This is stupid, but 726 // simple. 727 llvm::Value *Tmp = CGF.CreateTempAlloca(Ty); 728 llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy(); 729 llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy); 730 llvm::Value *SrcCasted = CGF.Builder.CreateBitCast(SrcPtr, I8PtrTy); 731 // FIXME: Use better alignment. 732 CGF.Builder.CreateMemCpy(Casted, SrcCasted, 733 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize), 734 1, false); 735 return CGF.Builder.CreateLoad(Tmp); 736} 737 738// Function to store a first-class aggregate into memory. We prefer to 739// store the elements rather than the aggregate to be more friendly to 740// fast-isel. 741// FIXME: Do we need to recurse here? 742static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, 743 llvm::Value *DestPtr, bool DestIsVolatile, 744 bool LowAlignment) { 745 // Prefer scalar stores to first-class aggregate stores. 746 if (llvm::StructType *STy = 747 dyn_cast<llvm::StructType>(Val->getType())) { 748 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 749 llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i); 750 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i); 751 llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr, 752 DestIsVolatile); 753 if (LowAlignment) 754 SI->setAlignment(1); 755 } 756 } else { 757 llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile); 758 if (LowAlignment) 759 SI->setAlignment(1); 760 } 761} 762 763/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 764/// where the source and destination may have different types. 765/// 766/// This safely handles the case when the src type is larger than the 767/// destination type; the upper bits of the src will be lost. 768static void CreateCoercedStore(llvm::Value *Src, 769 llvm::Value *DstPtr, 770 bool DstIsVolatile, 771 CodeGenFunction &CGF) { 772 llvm::Type *SrcTy = Src->getType(); 773 llvm::Type *DstTy = 774 cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 775 if (SrcTy == DstTy) { 776 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 777 return; 778 } 779 780 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 781 782 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 783 DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF); 784 DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 785 } 786 787 // If the source and destination are integer or pointer types, just do an 788 // extension or truncation to the desired type. 789 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 790 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 791 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 792 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 793 return; 794 } 795 796 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); 797 798 // If store is legal, just bitcast the src pointer. 799 if (SrcSize <= DstSize) { 800 llvm::Value *Casted = 801 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy)); 802 // FIXME: Use better alignment / avoid requiring aligned store. 803 BuildAggStore(CGF, Src, Casted, DstIsVolatile, true); 804 } else { 805 // Otherwise do coercion through memory. This is stupid, but 806 // simple. 807 808 // Generally SrcSize is never greater than DstSize, since this means we are 809 // losing bits. However, this can happen in cases where the structure has 810 // additional padding, for example due to a user specified alignment. 811 // 812 // FIXME: Assert that we aren't truncating non-padding bits when have access 813 // to that information. 814 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy); 815 CGF.Builder.CreateStore(Src, Tmp); 816 llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy(); 817 llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy); 818 llvm::Value *DstCasted = CGF.Builder.CreateBitCast(DstPtr, I8PtrTy); 819 // FIXME: Use better alignment. 820 CGF.Builder.CreateMemCpy(DstCasted, Casted, 821 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize), 822 1, false); 823 } 824} 825 826/***/ 827 828bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 829 return FI.getReturnInfo().isIndirect(); 830} 831 832bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 833 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 834 switch (BT->getKind()) { 835 default: 836 return false; 837 case BuiltinType::Float: 838 return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Float); 839 case BuiltinType::Double: 840 return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Double); 841 case BuiltinType::LongDouble: 842 return getContext().getTargetInfo().useObjCFPRetForRealType( 843 TargetInfo::LongDouble); 844 } 845 } 846 847 return false; 848} 849 850bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { 851 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { 852 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { 853 if (BT->getKind() == BuiltinType::LongDouble) 854 return getContext().getTargetInfo().useObjCFP2RetForComplexLongDouble(); 855 } 856 } 857 858 return false; 859} 860 861llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 862 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); 863 return GetFunctionType(FI); 864} 865 866llvm::FunctionType * 867CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { 868 869 bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted; 870 assert(Inserted && "Recursively being processed?"); 871 872 SmallVector<llvm::Type*, 8> argTypes; 873 llvm::Type *resultType = 0; 874 875 const ABIArgInfo &retAI = FI.getReturnInfo(); 876 switch (retAI.getKind()) { 877 case ABIArgInfo::Expand: 878 llvm_unreachable("Invalid ABI kind for return argument"); 879 880 case ABIArgInfo::Extend: 881 case ABIArgInfo::Direct: 882 resultType = retAI.getCoerceToType(); 883 break; 884 885 case ABIArgInfo::Indirect: { 886 assert(!retAI.getIndirectAlign() && "Align unused on indirect return."); 887 resultType = llvm::Type::getVoidTy(getLLVMContext()); 888 889 QualType ret = FI.getReturnType(); 890 llvm::Type *ty = ConvertType(ret); 891 unsigned addressSpace = Context.getTargetAddressSpace(ret); 892 argTypes.push_back(llvm::PointerType::get(ty, addressSpace)); 893 break; 894 } 895 896 case ABIArgInfo::Ignore: 897 resultType = llvm::Type::getVoidTy(getLLVMContext()); 898 break; 899 } 900 901 // Add in all of the required arguments. 902 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), ie; 903 if (FI.isVariadic()) { 904 ie = it + FI.getRequiredArgs().getNumRequiredArgs(); 905 } else { 906 ie = FI.arg_end(); 907 } 908 for (; it != ie; ++it) { 909 const ABIArgInfo &argAI = it->info; 910 911 // Insert a padding type to ensure proper alignment. 912 if (llvm::Type *PaddingType = argAI.getPaddingType()) 913 argTypes.push_back(PaddingType); 914 915 switch (argAI.getKind()) { 916 case ABIArgInfo::Ignore: 917 break; 918 919 case ABIArgInfo::Indirect: { 920 // indirect arguments are always on the stack, which is addr space #0. 921 llvm::Type *LTy = ConvertTypeForMem(it->type); 922 argTypes.push_back(LTy->getPointerTo()); 923 break; 924 } 925 926 case ABIArgInfo::Extend: 927 case ABIArgInfo::Direct: { 928 // If the coerce-to type is a first class aggregate, flatten it. Either 929 // way is semantically identical, but fast-isel and the optimizer 930 // generally likes scalar values better than FCAs. 931 llvm::Type *argType = argAI.getCoerceToType(); 932 if (llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) { 933 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) 934 argTypes.push_back(st->getElementType(i)); 935 } else { 936 argTypes.push_back(argType); 937 } 938 break; 939 } 940 941 case ABIArgInfo::Expand: 942 GetExpandedTypes(it->type, argTypes); 943 break; 944 } 945 } 946 947 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; 948 assert(Erased && "Not in set?"); 949 950 return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic()); 951} 952 953llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 954 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 955 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 956 957 if (!isFuncTypeConvertible(FPT)) 958 return llvm::StructType::get(getLLVMContext()); 959 960 const CGFunctionInfo *Info; 961 if (isa<CXXDestructorDecl>(MD)) 962 Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType()); 963 else 964 Info = &arrangeCXXMethodDeclaration(MD); 965 return GetFunctionType(*Info); 966} 967 968void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, 969 const Decl *TargetDecl, 970 AttributeListType &PAL, 971 unsigned &CallingConv) { 972 llvm::AttrBuilder FuncAttrs; 973 llvm::AttrBuilder RetAttrs; 974 975 CallingConv = FI.getEffectiveCallingConvention(); 976 977 if (FI.isNoReturn()) 978 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 979 980 // FIXME: handle sseregparm someday... 981 if (TargetDecl) { 982 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 983 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice); 984 if (TargetDecl->hasAttr<NoThrowAttr>()) 985 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 986 else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 987 const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>(); 988 if (FPT && FPT->isNothrow(getContext())) 989 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 990 } 991 992 if (TargetDecl->hasAttr<NoReturnAttr>() || 993 TargetDecl->hasAttr<CXX11NoReturnAttr>()) 994 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 995 996 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 997 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice); 998 999 // 'const' and 'pure' attribute functions are also nounwind. 1000 if (TargetDecl->hasAttr<ConstAttr>()) { 1001 FuncAttrs.addAttribute(llvm::Attribute::ReadNone); 1002 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1003 } else if (TargetDecl->hasAttr<PureAttr>()) { 1004 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly); 1005 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1006 } 1007 if (TargetDecl->hasAttr<MallocAttr>()) 1008 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 1009 } 1010 1011 if (CodeGenOpts.OptimizeSize) 1012 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize); 1013 if (CodeGenOpts.OptimizeSize == 2) 1014 FuncAttrs.addAttribute(llvm::Attribute::MinSize); 1015 if (CodeGenOpts.DisableRedZone) 1016 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone); 1017 if (CodeGenOpts.NoImplicitFloat) 1018 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat); 1019 1020 QualType RetTy = FI.getReturnType(); 1021 unsigned Index = 1; 1022 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1023 switch (RetAI.getKind()) { 1024 case ABIArgInfo::Extend: 1025 if (RetTy->hasSignedIntegerRepresentation()) 1026 RetAttrs.addAttribute(llvm::Attribute::SExt); 1027 else if (RetTy->hasUnsignedIntegerRepresentation()) 1028 RetAttrs.addAttribute(llvm::Attribute::ZExt); 1029 break; 1030 case ABIArgInfo::Direct: 1031 case ABIArgInfo::Ignore: 1032 break; 1033 1034 case ABIArgInfo::Indirect: { 1035 llvm::AttrBuilder SRETAttrs; 1036 SRETAttrs.addAttribute(llvm::Attribute::StructRet); 1037 if (RetAI.getInReg()) 1038 SRETAttrs.addAttribute(llvm::Attribute::InReg); 1039 PAL.push_back(llvm:: 1040 AttributeWithIndex::get(Index, 1041 llvm::Attribute::get(getLLVMContext(), 1042 SRETAttrs))); 1043 1044 ++Index; 1045 // sret disables readnone and readonly 1046 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1047 .removeAttribute(llvm::Attribute::ReadNone); 1048 break; 1049 } 1050 1051 case ABIArgInfo::Expand: 1052 llvm_unreachable("Invalid ABI kind for return argument"); 1053 } 1054 1055 if (RetAttrs.hasAttributes()) 1056 PAL.push_back(llvm:: 1057 AttributeWithIndex::get(llvm::AttributeSet::ReturnIndex, 1058 llvm::Attribute::get(getLLVMContext(), 1059 RetAttrs))); 1060 1061 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 1062 ie = FI.arg_end(); it != ie; ++it) { 1063 QualType ParamType = it->type; 1064 const ABIArgInfo &AI = it->info; 1065 llvm::AttrBuilder Attrs; 1066 1067 if (AI.getPaddingType()) { 1068 if (AI.getPaddingInReg()) { 1069 llvm::AttrBuilder PadAttrs; 1070 PadAttrs.addAttribute(llvm::Attribute::InReg); 1071 1072 llvm::Attribute A =llvm::Attribute::get(getLLVMContext(), PadAttrs); 1073 PAL.push_back(llvm::AttributeWithIndex::get(Index, A)); 1074 } 1075 // Increment Index if there is padding. 1076 ++Index; 1077 } 1078 1079 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 1080 // have the corresponding parameter variable. It doesn't make 1081 // sense to do it here because parameters are so messed up. 1082 switch (AI.getKind()) { 1083 case ABIArgInfo::Extend: 1084 if (ParamType->isSignedIntegerOrEnumerationType()) 1085 Attrs.addAttribute(llvm::Attribute::SExt); 1086 else if (ParamType->isUnsignedIntegerOrEnumerationType()) 1087 Attrs.addAttribute(llvm::Attribute::ZExt); 1088 // FALL THROUGH 1089 case ABIArgInfo::Direct: 1090 if (AI.getInReg()) 1091 Attrs.addAttribute(llvm::Attribute::InReg); 1092 1093 // FIXME: handle sseregparm someday... 1094 1095 if (llvm::StructType *STy = 1096 dyn_cast<llvm::StructType>(AI.getCoerceToType())) { 1097 unsigned Extra = STy->getNumElements()-1; // 1 will be added below. 1098 if (Attrs.hasAttributes()) 1099 for (unsigned I = 0; I < Extra; ++I) 1100 PAL.push_back(llvm::AttributeWithIndex::get(Index + I, 1101 llvm::Attribute::get(getLLVMContext(), 1102 Attrs))); 1103 Index += Extra; 1104 } 1105 break; 1106 1107 case ABIArgInfo::Indirect: 1108 if (AI.getInReg()) 1109 Attrs.addAttribute(llvm::Attribute::InReg); 1110 1111 if (AI.getIndirectByVal()) 1112 Attrs.addAttribute(llvm::Attribute::ByVal); 1113 1114 Attrs.addAlignmentAttr(AI.getIndirectAlign()); 1115 1116 // byval disables readnone and readonly. 1117 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1118 .removeAttribute(llvm::Attribute::ReadNone); 1119 break; 1120 1121 case ABIArgInfo::Ignore: 1122 // Skip increment, no matching LLVM parameter. 1123 continue; 1124 1125 case ABIArgInfo::Expand: { 1126 SmallVector<llvm::Type*, 8> types; 1127 // FIXME: This is rather inefficient. Do we ever actually need to do 1128 // anything here? The result should be just reconstructed on the other 1129 // side, so extension should be a non-issue. 1130 getTypes().GetExpandedTypes(ParamType, types); 1131 Index += types.size(); 1132 continue; 1133 } 1134 } 1135 1136 if (Attrs.hasAttributes()) 1137 PAL.push_back(llvm::AttributeWithIndex::get(Index, 1138 llvm::Attribute::get(getLLVMContext(), 1139 Attrs))); 1140 ++Index; 1141 } 1142 if (FuncAttrs.hasAttributes()) 1143 PAL.push_back(llvm:: 1144 AttributeWithIndex::get(llvm::AttributeSet::FunctionIndex, 1145 llvm::Attribute::get(getLLVMContext(), 1146 FuncAttrs))); 1147} 1148 1149/// An argument came in as a promoted argument; demote it back to its 1150/// declared type. 1151static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, 1152 const VarDecl *var, 1153 llvm::Value *value) { 1154 llvm::Type *varType = CGF.ConvertType(var->getType()); 1155 1156 // This can happen with promotions that actually don't change the 1157 // underlying type, like the enum promotions. 1158 if (value->getType() == varType) return value; 1159 1160 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) 1161 && "unexpected promotion type"); 1162 1163 if (isa<llvm::IntegerType>(varType)) 1164 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); 1165 1166 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); 1167} 1168 1169void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 1170 llvm::Function *Fn, 1171 const FunctionArgList &Args) { 1172 // If this is an implicit-return-zero function, go ahead and 1173 // initialize the return value. TODO: it might be nice to have 1174 // a more general mechanism for this that didn't require synthesized 1175 // return statements. 1176 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) { 1177 if (FD->hasImplicitReturnZero()) { 1178 QualType RetTy = FD->getResultType().getUnqualifiedType(); 1179 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 1180 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 1181 Builder.CreateStore(Zero, ReturnValue); 1182 } 1183 } 1184 1185 // FIXME: We no longer need the types from FunctionArgList; lift up and 1186 // simplify. 1187 1188 // Emit allocs for param decls. Give the LLVM Argument nodes names. 1189 llvm::Function::arg_iterator AI = Fn->arg_begin(); 1190 1191 // Name the struct return argument. 1192 if (CGM.ReturnTypeUsesSRet(FI)) { 1193 AI->setName("agg.result"); 1194 AI->addAttr(llvm::Attribute::get(getLLVMContext(), 1195 llvm::Attribute::NoAlias)); 1196 ++AI; 1197 } 1198 1199 assert(FI.arg_size() == Args.size() && 1200 "Mismatch between function signature & arguments."); 1201 unsigned ArgNo = 1; 1202 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 1203 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 1204 i != e; ++i, ++info_it, ++ArgNo) { 1205 const VarDecl *Arg = *i; 1206 QualType Ty = info_it->type; 1207 const ABIArgInfo &ArgI = info_it->info; 1208 1209 bool isPromoted = 1210 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); 1211 1212 // Skip the dummy padding argument. 1213 if (ArgI.getPaddingType()) 1214 ++AI; 1215 1216 switch (ArgI.getKind()) { 1217 case ABIArgInfo::Indirect: { 1218 llvm::Value *V = AI; 1219 1220 if (hasAggregateLLVMType(Ty)) { 1221 // Aggregates and complex variables are accessed by reference. All we 1222 // need to do is realign the value, if requested 1223 if (ArgI.getIndirectRealign()) { 1224 llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce"); 1225 1226 // Copy from the incoming argument pointer to the temporary with the 1227 // appropriate alignment. 1228 // 1229 // FIXME: We should have a common utility for generating an aggregate 1230 // copy. 1231 llvm::Type *I8PtrTy = Builder.getInt8PtrTy(); 1232 CharUnits Size = getContext().getTypeSizeInChars(Ty); 1233 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy); 1234 llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy); 1235 Builder.CreateMemCpy(Dst, 1236 Src, 1237 llvm::ConstantInt::get(IntPtrTy, 1238 Size.getQuantity()), 1239 ArgI.getIndirectAlign(), 1240 false); 1241 V = AlignedTemp; 1242 } 1243 } else { 1244 // Load scalar value from indirect argument. 1245 CharUnits Alignment = getContext().getTypeAlignInChars(Ty); 1246 V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty); 1247 1248 if (isPromoted) 1249 V = emitArgumentDemotion(*this, Arg, V); 1250 } 1251 EmitParmDecl(*Arg, V, ArgNo); 1252 break; 1253 } 1254 1255 case ABIArgInfo::Extend: 1256 case ABIArgInfo::Direct: { 1257 1258 // If we have the trivial case, handle it with no muss and fuss. 1259 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 1260 ArgI.getCoerceToType() == ConvertType(Ty) && 1261 ArgI.getDirectOffset() == 0) { 1262 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1263 llvm::Value *V = AI; 1264 1265 if (Arg->getType().isRestrictQualified()) 1266 AI->addAttr(llvm::Attribute::get(getLLVMContext(), 1267 llvm::Attribute::NoAlias)); 1268 1269 // Ensure the argument is the correct type. 1270 if (V->getType() != ArgI.getCoerceToType()) 1271 V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); 1272 1273 if (isPromoted) 1274 V = emitArgumentDemotion(*this, Arg, V); 1275 1276 // Because of merging of function types from multiple decls it is 1277 // possible for the type of an argument to not match the corresponding 1278 // type in the function type. Since we are codegening the callee 1279 // in here, add a cast to the argument type. 1280 llvm::Type *LTy = ConvertType(Arg->getType()); 1281 if (V->getType() != LTy) 1282 V = Builder.CreateBitCast(V, LTy); 1283 1284 EmitParmDecl(*Arg, V, ArgNo); 1285 break; 1286 } 1287 1288 llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName()); 1289 1290 // The alignment we need to use is the max of the requested alignment for 1291 // the argument plus the alignment required by our access code below. 1292 unsigned AlignmentToUse = 1293 CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType()); 1294 AlignmentToUse = std::max(AlignmentToUse, 1295 (unsigned)getContext().getDeclAlign(Arg).getQuantity()); 1296 1297 Alloca->setAlignment(AlignmentToUse); 1298 llvm::Value *V = Alloca; 1299 llvm::Value *Ptr = V; // Pointer to store into. 1300 1301 // If the value is offset in memory, apply the offset now. 1302 if (unsigned Offs = ArgI.getDirectOffset()) { 1303 Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy()); 1304 Ptr = Builder.CreateConstGEP1_32(Ptr, Offs); 1305 Ptr = Builder.CreateBitCast(Ptr, 1306 llvm::PointerType::getUnqual(ArgI.getCoerceToType())); 1307 } 1308 1309 // If the coerce-to type is a first class aggregate, we flatten it and 1310 // pass the elements. Either way is semantically identical, but fast-isel 1311 // and the optimizer generally likes scalar values better than FCAs. 1312 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); 1313 if (STy && STy->getNumElements() > 1) { 1314 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy); 1315 llvm::Type *DstTy = 1316 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 1317 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy); 1318 1319 if (SrcSize <= DstSize) { 1320 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy)); 1321 1322 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1323 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1324 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 1325 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i); 1326 Builder.CreateStore(AI++, EltPtr); 1327 } 1328 } else { 1329 llvm::AllocaInst *TempAlloca = 1330 CreateTempAlloca(ArgI.getCoerceToType(), "coerce"); 1331 TempAlloca->setAlignment(AlignmentToUse); 1332 llvm::Value *TempV = TempAlloca; 1333 1334 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1335 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1336 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 1337 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i); 1338 Builder.CreateStore(AI++, EltPtr); 1339 } 1340 1341 Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse); 1342 } 1343 } else { 1344 // Simple case, just do a coerced store of the argument into the alloca. 1345 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1346 AI->setName(Arg->getName() + ".coerce"); 1347 CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this); 1348 } 1349 1350 1351 // Match to what EmitParmDecl is expecting for this type. 1352 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) { 1353 V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty); 1354 if (isPromoted) 1355 V = emitArgumentDemotion(*this, Arg, V); 1356 } 1357 EmitParmDecl(*Arg, V, ArgNo); 1358 continue; // Skip ++AI increment, already done. 1359 } 1360 1361 case ABIArgInfo::Expand: { 1362 // If this structure was expanded into multiple arguments then 1363 // we need to create a temporary and reconstruct it from the 1364 // arguments. 1365 llvm::AllocaInst *Alloca = CreateMemTemp(Ty); 1366 CharUnits Align = getContext().getDeclAlign(Arg); 1367 Alloca->setAlignment(Align.getQuantity()); 1368 LValue LV = MakeAddrLValue(Alloca, Ty, Align); 1369 llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI); 1370 EmitParmDecl(*Arg, Alloca, ArgNo); 1371 1372 // Name the arguments used in expansion and increment AI. 1373 unsigned Index = 0; 1374 for (; AI != End; ++AI, ++Index) 1375 AI->setName(Arg->getName() + "." + Twine(Index)); 1376 continue; 1377 } 1378 1379 case ABIArgInfo::Ignore: 1380 // Initialize the local variable appropriately. 1381 if (hasAggregateLLVMType(Ty)) 1382 EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo); 1383 else 1384 EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())), 1385 ArgNo); 1386 1387 // Skip increment, no matching LLVM parameter. 1388 continue; 1389 } 1390 1391 ++AI; 1392 } 1393 assert(AI == Fn->arg_end() && "Argument mismatch!"); 1394} 1395 1396static void eraseUnusedBitCasts(llvm::Instruction *insn) { 1397 while (insn->use_empty()) { 1398 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); 1399 if (!bitcast) return; 1400 1401 // This is "safe" because we would have used a ConstantExpr otherwise. 1402 insn = cast<llvm::Instruction>(bitcast->getOperand(0)); 1403 bitcast->eraseFromParent(); 1404 } 1405} 1406 1407/// Try to emit a fused autorelease of a return result. 1408static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, 1409 llvm::Value *result) { 1410 // We must be immediately followed the cast. 1411 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); 1412 if (BB->empty()) return 0; 1413 if (&BB->back() != result) return 0; 1414 1415 llvm::Type *resultType = result->getType(); 1416 1417 // result is in a BasicBlock and is therefore an Instruction. 1418 llvm::Instruction *generator = cast<llvm::Instruction>(result); 1419 1420 SmallVector<llvm::Instruction*,4> insnsToKill; 1421 1422 // Look for: 1423 // %generator = bitcast %type1* %generator2 to %type2* 1424 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { 1425 // We would have emitted this as a constant if the operand weren't 1426 // an Instruction. 1427 generator = cast<llvm::Instruction>(bitcast->getOperand(0)); 1428 1429 // Require the generator to be immediately followed by the cast. 1430 if (generator->getNextNode() != bitcast) 1431 return 0; 1432 1433 insnsToKill.push_back(bitcast); 1434 } 1435 1436 // Look for: 1437 // %generator = call i8* @objc_retain(i8* %originalResult) 1438 // or 1439 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) 1440 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); 1441 if (!call) return 0; 1442 1443 bool doRetainAutorelease; 1444 1445 if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) { 1446 doRetainAutorelease = true; 1447 } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints() 1448 .objc_retainAutoreleasedReturnValue) { 1449 doRetainAutorelease = false; 1450 1451 // If we emitted an assembly marker for this call (and the 1452 // ARCEntrypoints field should have been set if so), go looking 1453 // for that call. If we can't find it, we can't do this 1454 // optimization. But it should always be the immediately previous 1455 // instruction, unless we needed bitcasts around the call. 1456 if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) { 1457 llvm::Instruction *prev = call->getPrevNode(); 1458 assert(prev); 1459 if (isa<llvm::BitCastInst>(prev)) { 1460 prev = prev->getPrevNode(); 1461 assert(prev); 1462 } 1463 assert(isa<llvm::CallInst>(prev)); 1464 assert(cast<llvm::CallInst>(prev)->getCalledValue() == 1465 CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker); 1466 insnsToKill.push_back(prev); 1467 } 1468 } else { 1469 return 0; 1470 } 1471 1472 result = call->getArgOperand(0); 1473 insnsToKill.push_back(call); 1474 1475 // Keep killing bitcasts, for sanity. Note that we no longer care 1476 // about precise ordering as long as there's exactly one use. 1477 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { 1478 if (!bitcast->hasOneUse()) break; 1479 insnsToKill.push_back(bitcast); 1480 result = bitcast->getOperand(0); 1481 } 1482 1483 // Delete all the unnecessary instructions, from latest to earliest. 1484 for (SmallVectorImpl<llvm::Instruction*>::iterator 1485 i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i) 1486 (*i)->eraseFromParent(); 1487 1488 // Do the fused retain/autorelease if we were asked to. 1489 if (doRetainAutorelease) 1490 result = CGF.EmitARCRetainAutoreleaseReturnValue(result); 1491 1492 // Cast back to the result type. 1493 return CGF.Builder.CreateBitCast(result, resultType); 1494} 1495 1496/// If this is a +1 of the value of an immutable 'self', remove it. 1497static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, 1498 llvm::Value *result) { 1499 // This is only applicable to a method with an immutable 'self'. 1500 const ObjCMethodDecl *method = 1501 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl); 1502 if (!method) return 0; 1503 const VarDecl *self = method->getSelfDecl(); 1504 if (!self->getType().isConstQualified()) return 0; 1505 1506 // Look for a retain call. 1507 llvm::CallInst *retainCall = 1508 dyn_cast<llvm::CallInst>(result->stripPointerCasts()); 1509 if (!retainCall || 1510 retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain) 1511 return 0; 1512 1513 // Look for an ordinary load of 'self'. 1514 llvm::Value *retainedValue = retainCall->getArgOperand(0); 1515 llvm::LoadInst *load = 1516 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); 1517 if (!load || load->isAtomic() || load->isVolatile() || 1518 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self)) 1519 return 0; 1520 1521 // Okay! Burn it all down. This relies for correctness on the 1522 // assumption that the retain is emitted as part of the return and 1523 // that thereafter everything is used "linearly". 1524 llvm::Type *resultType = result->getType(); 1525 eraseUnusedBitCasts(cast<llvm::Instruction>(result)); 1526 assert(retainCall->use_empty()); 1527 retainCall->eraseFromParent(); 1528 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); 1529 1530 return CGF.Builder.CreateBitCast(load, resultType); 1531} 1532 1533/// Emit an ARC autorelease of the result of a function. 1534/// 1535/// \return the value to actually return from the function 1536static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, 1537 llvm::Value *result) { 1538 // If we're returning 'self', kill the initial retain. This is a 1539 // heuristic attempt to "encourage correctness" in the really unfortunate 1540 // case where we have a return of self during a dealloc and we desperately 1541 // need to avoid the possible autorelease. 1542 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) 1543 return self; 1544 1545 // At -O0, try to emit a fused retain/autorelease. 1546 if (CGF.shouldUseFusedARCCalls()) 1547 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) 1548 return fused; 1549 1550 return CGF.EmitARCAutoreleaseReturnValue(result); 1551} 1552 1553/// Heuristically search for a dominating store to the return-value slot. 1554static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { 1555 // If there are multiple uses of the return-value slot, just check 1556 // for something immediately preceding the IP. Sometimes this can 1557 // happen with how we generate implicit-returns; it can also happen 1558 // with noreturn cleanups. 1559 if (!CGF.ReturnValue->hasOneUse()) { 1560 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 1561 if (IP->empty()) return 0; 1562 llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back()); 1563 if (!store) return 0; 1564 if (store->getPointerOperand() != CGF.ReturnValue) return 0; 1565 assert(!store->isAtomic() && !store->isVolatile()); // see below 1566 return store; 1567 } 1568 1569 llvm::StoreInst *store = 1570 dyn_cast<llvm::StoreInst>(CGF.ReturnValue->use_back()); 1571 if (!store) return 0; 1572 1573 // These aren't actually possible for non-coerced returns, and we 1574 // only care about non-coerced returns on this code path. 1575 assert(!store->isAtomic() && !store->isVolatile()); 1576 1577 // Now do a first-and-dirty dominance check: just walk up the 1578 // single-predecessors chain from the current insertion point. 1579 llvm::BasicBlock *StoreBB = store->getParent(); 1580 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 1581 while (IP != StoreBB) { 1582 if (!(IP = IP->getSinglePredecessor())) 1583 return 0; 1584 } 1585 1586 // Okay, the store's basic block dominates the insertion point; we 1587 // can do our thing. 1588 return store; 1589} 1590 1591void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) { 1592 // Functions with no result always return void. 1593 if (ReturnValue == 0) { 1594 Builder.CreateRetVoid(); 1595 return; 1596 } 1597 1598 llvm::DebugLoc RetDbgLoc; 1599 llvm::Value *RV = 0; 1600 QualType RetTy = FI.getReturnType(); 1601 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1602 1603 switch (RetAI.getKind()) { 1604 case ABIArgInfo::Indirect: { 1605 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity(); 1606 if (RetTy->isAnyComplexType()) { 1607 ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false); 1608 StoreComplexToAddr(RT, CurFn->arg_begin(), false); 1609 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 1610 // Do nothing; aggregrates get evaluated directly into the destination. 1611 } else { 1612 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(), 1613 false, Alignment, RetTy); 1614 } 1615 break; 1616 } 1617 1618 case ABIArgInfo::Extend: 1619 case ABIArgInfo::Direct: 1620 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 1621 RetAI.getDirectOffset() == 0) { 1622 // The internal return value temp always will have pointer-to-return-type 1623 // type, just do a load. 1624 1625 // If there is a dominating store to ReturnValue, we can elide 1626 // the load, zap the store, and usually zap the alloca. 1627 if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) { 1628 // Get the stored value and nuke the now-dead store. 1629 RetDbgLoc = SI->getDebugLoc(); 1630 RV = SI->getValueOperand(); 1631 SI->eraseFromParent(); 1632 1633 // If that was the only use of the return value, nuke it as well now. 1634 if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) { 1635 cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent(); 1636 ReturnValue = 0; 1637 } 1638 1639 // Otherwise, we have to do a simple load. 1640 } else { 1641 RV = Builder.CreateLoad(ReturnValue); 1642 } 1643 } else { 1644 llvm::Value *V = ReturnValue; 1645 // If the value is offset in memory, apply the offset now. 1646 if (unsigned Offs = RetAI.getDirectOffset()) { 1647 V = Builder.CreateBitCast(V, Builder.getInt8PtrTy()); 1648 V = Builder.CreateConstGEP1_32(V, Offs); 1649 V = Builder.CreateBitCast(V, 1650 llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 1651 } 1652 1653 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); 1654 } 1655 1656 // In ARC, end functions that return a retainable type with a call 1657 // to objc_autoreleaseReturnValue. 1658 if (AutoreleaseResult) { 1659 assert(getLangOpts().ObjCAutoRefCount && 1660 !FI.isReturnsRetained() && 1661 RetTy->isObjCRetainableType()); 1662 RV = emitAutoreleaseOfResult(*this, RV); 1663 } 1664 1665 break; 1666 1667 case ABIArgInfo::Ignore: 1668 break; 1669 1670 case ABIArgInfo::Expand: 1671 llvm_unreachable("Invalid ABI kind for return argument"); 1672 } 1673 1674 llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid(); 1675 if (!RetDbgLoc.isUnknown()) 1676 Ret->setDebugLoc(RetDbgLoc); 1677} 1678 1679void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, 1680 const VarDecl *param) { 1681 // StartFunction converted the ABI-lowered parameter(s) into a 1682 // local alloca. We need to turn that into an r-value suitable 1683 // for EmitCall. 1684 llvm::Value *local = GetAddrOfLocalVar(param); 1685 1686 QualType type = param->getType(); 1687 1688 // For the most part, we just need to load the alloca, except: 1689 // 1) aggregate r-values are actually pointers to temporaries, and 1690 // 2) references to aggregates are pointers directly to the aggregate. 1691 // I don't know why references to non-aggregates are different here. 1692 if (const ReferenceType *ref = type->getAs<ReferenceType>()) { 1693 if (hasAggregateLLVMType(ref->getPointeeType())) 1694 return args.add(RValue::getAggregate(local), type); 1695 1696 // Locals which are references to scalars are represented 1697 // with allocas holding the pointer. 1698 return args.add(RValue::get(Builder.CreateLoad(local)), type); 1699 } 1700 1701 if (type->isAnyComplexType()) { 1702 ComplexPairTy complex = LoadComplexFromAddr(local, /*volatile*/ false); 1703 return args.add(RValue::getComplex(complex), type); 1704 } 1705 1706 if (hasAggregateLLVMType(type)) 1707 return args.add(RValue::getAggregate(local), type); 1708 1709 unsigned alignment = getContext().getDeclAlign(param).getQuantity(); 1710 llvm::Value *value = EmitLoadOfScalar(local, false, alignment, type); 1711 return args.add(RValue::get(value), type); 1712} 1713 1714static bool isProvablyNull(llvm::Value *addr) { 1715 return isa<llvm::ConstantPointerNull>(addr); 1716} 1717 1718static bool isProvablyNonNull(llvm::Value *addr) { 1719 return isa<llvm::AllocaInst>(addr); 1720} 1721 1722/// Emit the actual writing-back of a writeback. 1723static void emitWriteback(CodeGenFunction &CGF, 1724 const CallArgList::Writeback &writeback) { 1725 llvm::Value *srcAddr = writeback.Address; 1726 assert(!isProvablyNull(srcAddr) && 1727 "shouldn't have writeback for provably null argument"); 1728 1729 llvm::BasicBlock *contBB = 0; 1730 1731 // If the argument wasn't provably non-null, we need to null check 1732 // before doing the store. 1733 bool provablyNonNull = isProvablyNonNull(srcAddr); 1734 if (!provablyNonNull) { 1735 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); 1736 contBB = CGF.createBasicBlock("icr.done"); 1737 1738 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); 1739 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); 1740 CGF.EmitBlock(writebackBB); 1741 } 1742 1743 // Load the value to writeback. 1744 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); 1745 1746 // Cast it back, in case we're writing an id to a Foo* or something. 1747 value = CGF.Builder.CreateBitCast(value, 1748 cast<llvm::PointerType>(srcAddr->getType())->getElementType(), 1749 "icr.writeback-cast"); 1750 1751 // Perform the writeback. 1752 QualType srcAddrType = writeback.AddressType; 1753 CGF.EmitStoreThroughLValue(RValue::get(value), 1754 CGF.MakeAddrLValue(srcAddr, srcAddrType)); 1755 1756 // Jump to the continuation block. 1757 if (!provablyNonNull) 1758 CGF.EmitBlock(contBB); 1759} 1760 1761static void emitWritebacks(CodeGenFunction &CGF, 1762 const CallArgList &args) { 1763 for (CallArgList::writeback_iterator 1764 i = args.writeback_begin(), e = args.writeback_end(); i != e; ++i) 1765 emitWriteback(CGF, *i); 1766} 1767 1768/// Emit an argument that's being passed call-by-writeback. That is, 1769/// we are passing the address of 1770static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, 1771 const ObjCIndirectCopyRestoreExpr *CRE) { 1772 llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr()); 1773 1774 // The dest and src types don't necessarily match in LLVM terms 1775 // because of the crazy ObjC compatibility rules. 1776 1777 llvm::PointerType *destType = 1778 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); 1779 1780 // If the address is a constant null, just pass the appropriate null. 1781 if (isProvablyNull(srcAddr)) { 1782 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), 1783 CRE->getType()); 1784 return; 1785 } 1786 1787 QualType srcAddrType = 1788 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); 1789 1790 // Create the temporary. 1791 llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(), 1792 "icr.temp"); 1793 // Loading an l-value can introduce a cleanup if the l-value is __weak, 1794 // and that cleanup will be conditional if we can't prove that the l-value 1795 // isn't null, so we need to register a dominating point so that the cleanups 1796 // system will make valid IR. 1797 CodeGenFunction::ConditionalEvaluation condEval(CGF); 1798 1799 // Zero-initialize it if we're not doing a copy-initialization. 1800 bool shouldCopy = CRE->shouldCopy(); 1801 if (!shouldCopy) { 1802 llvm::Value *null = 1803 llvm::ConstantPointerNull::get( 1804 cast<llvm::PointerType>(destType->getElementType())); 1805 CGF.Builder.CreateStore(null, temp); 1806 } 1807 1808 llvm::BasicBlock *contBB = 0; 1809 1810 // If the address is *not* known to be non-null, we need to switch. 1811 llvm::Value *finalArgument; 1812 1813 bool provablyNonNull = isProvablyNonNull(srcAddr); 1814 if (provablyNonNull) { 1815 finalArgument = temp; 1816 } else { 1817 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); 1818 1819 finalArgument = CGF.Builder.CreateSelect(isNull, 1820 llvm::ConstantPointerNull::get(destType), 1821 temp, "icr.argument"); 1822 1823 // If we need to copy, then the load has to be conditional, which 1824 // means we need control flow. 1825 if (shouldCopy) { 1826 contBB = CGF.createBasicBlock("icr.cont"); 1827 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); 1828 CGF.Builder.CreateCondBr(isNull, contBB, copyBB); 1829 CGF.EmitBlock(copyBB); 1830 condEval.begin(CGF); 1831 } 1832 } 1833 1834 // Perform a copy if necessary. 1835 if (shouldCopy) { 1836 LValue srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType); 1837 RValue srcRV = CGF.EmitLoadOfLValue(srcLV); 1838 assert(srcRV.isScalar()); 1839 1840 llvm::Value *src = srcRV.getScalarVal(); 1841 src = CGF.Builder.CreateBitCast(src, destType->getElementType(), 1842 "icr.cast"); 1843 1844 // Use an ordinary store, not a store-to-lvalue. 1845 CGF.Builder.CreateStore(src, temp); 1846 } 1847 1848 // Finish the control flow if we needed it. 1849 if (shouldCopy && !provablyNonNull) { 1850 CGF.EmitBlock(contBB); 1851 condEval.end(CGF); 1852 } 1853 1854 args.addWriteback(srcAddr, srcAddrType, temp); 1855 args.add(RValue::get(finalArgument), CRE->getType()); 1856} 1857 1858void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, 1859 QualType type) { 1860 if (const ObjCIndirectCopyRestoreExpr *CRE 1861 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { 1862 assert(getLangOpts().ObjCAutoRefCount); 1863 assert(getContext().hasSameType(E->getType(), type)); 1864 return emitWritebackArg(*this, args, CRE); 1865 } 1866 1867 assert(type->isReferenceType() == E->isGLValue() && 1868 "reference binding to unmaterialized r-value!"); 1869 1870 if (E->isGLValue()) { 1871 assert(E->getObjectKind() == OK_Ordinary); 1872 return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0), 1873 type); 1874 } 1875 1876 if (hasAggregateLLVMType(type) && !E->getType()->isAnyComplexType() && 1877 isa<ImplicitCastExpr>(E) && 1878 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { 1879 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); 1880 assert(L.isSimple()); 1881 args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true); 1882 return; 1883 } 1884 1885 args.add(EmitAnyExprToTemp(E), type); 1886} 1887 1888// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 1889// optimizer it can aggressively ignore unwind edges. 1890void 1891CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { 1892 if (CGM.getCodeGenOpts().OptimizationLevel != 0 && 1893 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) 1894 Inst->setMetadata("clang.arc.no_objc_arc_exceptions", 1895 CGM.getNoObjCARCExceptionsMetadata()); 1896} 1897 1898/// Emits a call or invoke instruction to the given function, depending 1899/// on the current state of the EH stack. 1900llvm::CallSite 1901CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 1902 ArrayRef<llvm::Value *> Args, 1903 const Twine &Name) { 1904 llvm::BasicBlock *InvokeDest = getInvokeDest(); 1905 1906 llvm::Instruction *Inst; 1907 if (!InvokeDest) 1908 Inst = Builder.CreateCall(Callee, Args, Name); 1909 else { 1910 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 1911 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name); 1912 EmitBlock(ContBB); 1913 } 1914 1915 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 1916 // optimizer it can aggressively ignore unwind edges. 1917 if (CGM.getLangOpts().ObjCAutoRefCount) 1918 AddObjCARCExceptionMetadata(Inst); 1919 1920 return Inst; 1921} 1922 1923llvm::CallSite 1924CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 1925 const Twine &Name) { 1926 return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name); 1927} 1928 1929static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo, 1930 llvm::FunctionType *FTy) { 1931 if (ArgNo < FTy->getNumParams()) 1932 assert(Elt->getType() == FTy->getParamType(ArgNo)); 1933 else 1934 assert(FTy->isVarArg()); 1935 ++ArgNo; 1936} 1937 1938void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV, 1939 SmallVector<llvm::Value*,16> &Args, 1940 llvm::FunctionType *IRFuncTy) { 1941 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 1942 unsigned NumElts = AT->getSize().getZExtValue(); 1943 QualType EltTy = AT->getElementType(); 1944 llvm::Value *Addr = RV.getAggregateAddr(); 1945 for (unsigned Elt = 0; Elt < NumElts; ++Elt) { 1946 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt); 1947 LValue LV = MakeAddrLValue(EltAddr, EltTy); 1948 RValue EltRV; 1949 if (EltTy->isAnyComplexType()) 1950 // FIXME: Volatile? 1951 EltRV = RValue::getComplex(LoadComplexFromAddr(LV.getAddress(), false)); 1952 else if (CodeGenFunction::hasAggregateLLVMType(EltTy)) 1953 EltRV = LV.asAggregateRValue(); 1954 else 1955 EltRV = EmitLoadOfLValue(LV); 1956 ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy); 1957 } 1958 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 1959 RecordDecl *RD = RT->getDecl(); 1960 assert(RV.isAggregate() && "Unexpected rvalue during struct expansion"); 1961 LValue LV = MakeAddrLValue(RV.getAggregateAddr(), Ty); 1962 1963 if (RD->isUnion()) { 1964 const FieldDecl *LargestFD = 0; 1965 CharUnits UnionSize = CharUnits::Zero(); 1966 1967 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1968 i != e; ++i) { 1969 const FieldDecl *FD = *i; 1970 assert(!FD->isBitField() && 1971 "Cannot expand structure with bit-field members."); 1972 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType()); 1973 if (UnionSize < FieldSize) { 1974 UnionSize = FieldSize; 1975 LargestFD = FD; 1976 } 1977 } 1978 if (LargestFD) { 1979 RValue FldRV = EmitRValueForField(LV, LargestFD); 1980 ExpandTypeToArgs(LargestFD->getType(), FldRV, Args, IRFuncTy); 1981 } 1982 } else { 1983 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1984 i != e; ++i) { 1985 FieldDecl *FD = *i; 1986 1987 RValue FldRV = EmitRValueForField(LV, FD); 1988 ExpandTypeToArgs(FD->getType(), FldRV, Args, IRFuncTy); 1989 } 1990 } 1991 } else if (Ty->isAnyComplexType()) { 1992 ComplexPairTy CV = RV.getComplexVal(); 1993 Args.push_back(CV.first); 1994 Args.push_back(CV.second); 1995 } else { 1996 assert(RV.isScalar() && 1997 "Unexpected non-scalar rvalue during struct expansion."); 1998 1999 // Insert a bitcast as needed. 2000 llvm::Value *V = RV.getScalarVal(); 2001 if (Args.size() < IRFuncTy->getNumParams() && 2002 V->getType() != IRFuncTy->getParamType(Args.size())) 2003 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size())); 2004 2005 Args.push_back(V); 2006 } 2007} 2008 2009 2010RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 2011 llvm::Value *Callee, 2012 ReturnValueSlot ReturnValue, 2013 const CallArgList &CallArgs, 2014 const Decl *TargetDecl, 2015 llvm::Instruction **callOrInvoke) { 2016 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 2017 SmallVector<llvm::Value*, 16> Args; 2018 2019 // Handle struct-return functions by passing a pointer to the 2020 // location that we would like to return into. 2021 QualType RetTy = CallInfo.getReturnType(); 2022 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 2023 2024 // IRArgNo - Keep track of the argument number in the callee we're looking at. 2025 unsigned IRArgNo = 0; 2026 llvm::FunctionType *IRFuncTy = 2027 cast<llvm::FunctionType>( 2028 cast<llvm::PointerType>(Callee->getType())->getElementType()); 2029 2030 // If the call returns a temporary with struct return, create a temporary 2031 // alloca to hold the result, unless one is given to us. 2032 if (CGM.ReturnTypeUsesSRet(CallInfo)) { 2033 llvm::Value *Value = ReturnValue.getValue(); 2034 if (!Value) 2035 Value = CreateMemTemp(RetTy); 2036 Args.push_back(Value); 2037 checkArgMatches(Value, IRArgNo, IRFuncTy); 2038 } 2039 2040 assert(CallInfo.arg_size() == CallArgs.size() && 2041 "Mismatch between function signature & arguments."); 2042 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 2043 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 2044 I != E; ++I, ++info_it) { 2045 const ABIArgInfo &ArgInfo = info_it->info; 2046 RValue RV = I->RV; 2047 2048 unsigned TypeAlign = 2049 getContext().getTypeAlignInChars(I->Ty).getQuantity(); 2050 2051 // Insert a padding argument to ensure proper alignment. 2052 if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) { 2053 Args.push_back(llvm::UndefValue::get(PaddingType)); 2054 ++IRArgNo; 2055 } 2056 2057 switch (ArgInfo.getKind()) { 2058 case ABIArgInfo::Indirect: { 2059 if (RV.isScalar() || RV.isComplex()) { 2060 // Make a temporary alloca to pass the argument. 2061 llvm::AllocaInst *AI = CreateMemTemp(I->Ty); 2062 if (ArgInfo.getIndirectAlign() > AI->getAlignment()) 2063 AI->setAlignment(ArgInfo.getIndirectAlign()); 2064 Args.push_back(AI); 2065 2066 if (RV.isScalar()) 2067 EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, 2068 TypeAlign, I->Ty); 2069 else 2070 StoreComplexToAddr(RV.getComplexVal(), Args.back(), false); 2071 2072 // Validate argument match. 2073 checkArgMatches(AI, IRArgNo, IRFuncTy); 2074 } else { 2075 // We want to avoid creating an unnecessary temporary+copy here; 2076 // however, we need one in two cases: 2077 // 1. If the argument is not byval, and we are required to copy the 2078 // source. (This case doesn't occur on any common architecture.) 2079 // 2. If the argument is byval, RV is not sufficiently aligned, and 2080 // we cannot force it to be sufficiently aligned. 2081 llvm::Value *Addr = RV.getAggregateAddr(); 2082 unsigned Align = ArgInfo.getIndirectAlign(); 2083 const llvm::DataLayout *TD = &CGM.getDataLayout(); 2084 if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) || 2085 (ArgInfo.getIndirectByVal() && TypeAlign < Align && 2086 llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align)) { 2087 // Create an aligned temporary, and copy to it. 2088 llvm::AllocaInst *AI = CreateMemTemp(I->Ty); 2089 if (Align > AI->getAlignment()) 2090 AI->setAlignment(Align); 2091 Args.push_back(AI); 2092 EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified()); 2093 2094 // Validate argument match. 2095 checkArgMatches(AI, IRArgNo, IRFuncTy); 2096 } else { 2097 // Skip the extra memcpy call. 2098 Args.push_back(Addr); 2099 2100 // Validate argument match. 2101 checkArgMatches(Addr, IRArgNo, IRFuncTy); 2102 } 2103 } 2104 break; 2105 } 2106 2107 case ABIArgInfo::Ignore: 2108 break; 2109 2110 case ABIArgInfo::Extend: 2111 case ABIArgInfo::Direct: { 2112 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 2113 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 2114 ArgInfo.getDirectOffset() == 0) { 2115 llvm::Value *V; 2116 if (RV.isScalar()) 2117 V = RV.getScalarVal(); 2118 else 2119 V = Builder.CreateLoad(RV.getAggregateAddr()); 2120 2121 // If the argument doesn't match, perform a bitcast to coerce it. This 2122 // can happen due to trivial type mismatches. 2123 if (IRArgNo < IRFuncTy->getNumParams() && 2124 V->getType() != IRFuncTy->getParamType(IRArgNo)) 2125 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo)); 2126 Args.push_back(V); 2127 2128 checkArgMatches(V, IRArgNo, IRFuncTy); 2129 break; 2130 } 2131 2132 // FIXME: Avoid the conversion through memory if possible. 2133 llvm::Value *SrcPtr; 2134 if (RV.isScalar()) { 2135 SrcPtr = CreateMemTemp(I->Ty, "coerce"); 2136 EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, TypeAlign, I->Ty); 2137 } else if (RV.isComplex()) { 2138 SrcPtr = CreateMemTemp(I->Ty, "coerce"); 2139 StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false); 2140 } else 2141 SrcPtr = RV.getAggregateAddr(); 2142 2143 // If the value is offset in memory, apply the offset now. 2144 if (unsigned Offs = ArgInfo.getDirectOffset()) { 2145 SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy()); 2146 SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs); 2147 SrcPtr = Builder.CreateBitCast(SrcPtr, 2148 llvm::PointerType::getUnqual(ArgInfo.getCoerceToType())); 2149 2150 } 2151 2152 // If the coerce-to type is a first class aggregate, we flatten it and 2153 // pass the elements. Either way is semantically identical, but fast-isel 2154 // and the optimizer generally likes scalar values better than FCAs. 2155 if (llvm::StructType *STy = 2156 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) { 2157 llvm::Type *SrcTy = 2158 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 2159 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); 2160 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); 2161 2162 // If the source type is smaller than the destination type of the 2163 // coerce-to logic, copy the source value into a temp alloca the size 2164 // of the destination type to allow loading all of it. The bits past 2165 // the source value are left undef. 2166 if (SrcSize < DstSize) { 2167 llvm::AllocaInst *TempAlloca 2168 = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce"); 2169 Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0); 2170 SrcPtr = TempAlloca; 2171 } else { 2172 SrcPtr = Builder.CreateBitCast(SrcPtr, 2173 llvm::PointerType::getUnqual(STy)); 2174 } 2175 2176 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2177 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i); 2178 llvm::LoadInst *LI = Builder.CreateLoad(EltPtr); 2179 // We don't know what we're loading from. 2180 LI->setAlignment(1); 2181 Args.push_back(LI); 2182 2183 // Validate argument match. 2184 checkArgMatches(LI, IRArgNo, IRFuncTy); 2185 } 2186 } else { 2187 // In the simple case, just pass the coerced loaded value. 2188 Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), 2189 *this)); 2190 2191 // Validate argument match. 2192 checkArgMatches(Args.back(), IRArgNo, IRFuncTy); 2193 } 2194 2195 break; 2196 } 2197 2198 case ABIArgInfo::Expand: 2199 ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy); 2200 IRArgNo = Args.size(); 2201 break; 2202 } 2203 } 2204 2205 // If the callee is a bitcast of a function to a varargs pointer to function 2206 // type, check to see if we can remove the bitcast. This handles some cases 2207 // with unprototyped functions. 2208 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee)) 2209 if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) { 2210 llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType()); 2211 llvm::FunctionType *CurFT = 2212 cast<llvm::FunctionType>(CurPT->getElementType()); 2213 llvm::FunctionType *ActualFT = CalleeF->getFunctionType(); 2214 2215 if (CE->getOpcode() == llvm::Instruction::BitCast && 2216 ActualFT->getReturnType() == CurFT->getReturnType() && 2217 ActualFT->getNumParams() == CurFT->getNumParams() && 2218 ActualFT->getNumParams() == Args.size() && 2219 (CurFT->isVarArg() || !ActualFT->isVarArg())) { 2220 bool ArgsMatch = true; 2221 for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i) 2222 if (ActualFT->getParamType(i) != CurFT->getParamType(i)) { 2223 ArgsMatch = false; 2224 break; 2225 } 2226 2227 // Strip the cast if we can get away with it. This is a nice cleanup, 2228 // but also allows us to inline the function at -O0 if it is marked 2229 // always_inline. 2230 if (ArgsMatch) 2231 Callee = CalleeF; 2232 } 2233 } 2234 2235 unsigned CallingConv; 2236 CodeGen::AttributeListType AttributeList; 2237 CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv); 2238 llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(), 2239 AttributeList); 2240 2241 llvm::BasicBlock *InvokeDest = 0; 2242 if (!Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex, 2243 llvm::Attribute::NoUnwind)) 2244 InvokeDest = getInvokeDest(); 2245 2246 llvm::CallSite CS; 2247 if (!InvokeDest) { 2248 CS = Builder.CreateCall(Callee, Args); 2249 } else { 2250 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 2251 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args); 2252 EmitBlock(Cont); 2253 } 2254 if (callOrInvoke) 2255 *callOrInvoke = CS.getInstruction(); 2256 2257 CS.setAttributes(Attrs); 2258 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 2259 2260 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 2261 // optimizer it can aggressively ignore unwind edges. 2262 if (CGM.getLangOpts().ObjCAutoRefCount) 2263 AddObjCARCExceptionMetadata(CS.getInstruction()); 2264 2265 // If the call doesn't return, finish the basic block and clear the 2266 // insertion point; this allows the rest of IRgen to discard 2267 // unreachable code. 2268 if (CS.doesNotReturn()) { 2269 Builder.CreateUnreachable(); 2270 Builder.ClearInsertionPoint(); 2271 2272 // FIXME: For now, emit a dummy basic block because expr emitters in 2273 // generally are not ready to handle emitting expressions at unreachable 2274 // points. 2275 EnsureInsertPoint(); 2276 2277 // Return a reasonable RValue. 2278 return GetUndefRValue(RetTy); 2279 } 2280 2281 llvm::Instruction *CI = CS.getInstruction(); 2282 if (Builder.isNamePreserving() && !CI->getType()->isVoidTy()) 2283 CI->setName("call"); 2284 2285 // Emit any writebacks immediately. Arguably this should happen 2286 // after any return-value munging. 2287 if (CallArgs.hasWritebacks()) 2288 emitWritebacks(*this, CallArgs); 2289 2290 switch (RetAI.getKind()) { 2291 case ABIArgInfo::Indirect: { 2292 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity(); 2293 if (RetTy->isAnyComplexType()) 2294 return RValue::getComplex(LoadComplexFromAddr(Args[0], false)); 2295 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) 2296 return RValue::getAggregate(Args[0]); 2297 return RValue::get(EmitLoadOfScalar(Args[0], false, Alignment, RetTy)); 2298 } 2299 2300 case ABIArgInfo::Ignore: 2301 // If we are ignoring an argument that had a result, make sure to 2302 // construct the appropriate return value for our caller. 2303 return GetUndefRValue(RetTy); 2304 2305 case ABIArgInfo::Extend: 2306 case ABIArgInfo::Direct: { 2307 llvm::Type *RetIRTy = ConvertType(RetTy); 2308 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { 2309 if (RetTy->isAnyComplexType()) { 2310 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 2311 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 2312 return RValue::getComplex(std::make_pair(Real, Imag)); 2313 } 2314 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 2315 llvm::Value *DestPtr = ReturnValue.getValue(); 2316 bool DestIsVolatile = ReturnValue.isVolatile(); 2317 2318 if (!DestPtr) { 2319 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 2320 DestIsVolatile = false; 2321 } 2322 BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false); 2323 return RValue::getAggregate(DestPtr); 2324 } 2325 2326 // If the argument doesn't match, perform a bitcast to coerce it. This 2327 // can happen due to trivial type mismatches. 2328 llvm::Value *V = CI; 2329 if (V->getType() != RetIRTy) 2330 V = Builder.CreateBitCast(V, RetIRTy); 2331 return RValue::get(V); 2332 } 2333 2334 llvm::Value *DestPtr = ReturnValue.getValue(); 2335 bool DestIsVolatile = ReturnValue.isVolatile(); 2336 2337 if (!DestPtr) { 2338 DestPtr = CreateMemTemp(RetTy, "coerce"); 2339 DestIsVolatile = false; 2340 } 2341 2342 // If the value is offset in memory, apply the offset now. 2343 llvm::Value *StorePtr = DestPtr; 2344 if (unsigned Offs = RetAI.getDirectOffset()) { 2345 StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy()); 2346 StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs); 2347 StorePtr = Builder.CreateBitCast(StorePtr, 2348 llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 2349 } 2350 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); 2351 2352 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity(); 2353 if (RetTy->isAnyComplexType()) 2354 return RValue::getComplex(LoadComplexFromAddr(DestPtr, false)); 2355 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) 2356 return RValue::getAggregate(DestPtr); 2357 return RValue::get(EmitLoadOfScalar(DestPtr, false, Alignment, RetTy)); 2358 } 2359 2360 case ABIArgInfo::Expand: 2361 llvm_unreachable("Invalid ABI kind for return argument"); 2362 } 2363 2364 llvm_unreachable("Unhandled ABIArgInfo::Kind"); 2365} 2366 2367/* VarArg handling */ 2368 2369llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) { 2370 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this); 2371} 2372