CGCall.cpp revision 976f266b969e03ef08b37b5f4aaf013f48f1ba6e
172735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project//===--- CGCall.cpp - Encapsulate calling convention details ----*- C++ -*-===//
272735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project//
372735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project//                     The LLVM Compiler Infrastructure
472735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project//
572735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project// This file is distributed under the University of Illinois Open Source
672735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project// License. See LICENSE.TXT for details.
772735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project//
872735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project//===----------------------------------------------------------------------===//
972735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project//
1072735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project// These classes wrap the information about a call or function
1172735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project// definition used to handle ABI compliancy.
1272735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project//
1372735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project//===----------------------------------------------------------------------===//
1472735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project
1572735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project#include "CGCall.h"
1672735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project#include "CGCXXABI.h"
1772735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project#include "ABIInfo.h"
1872735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project#include "CodeGenFunction.h"
1972735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project#include "CodeGenModule.h"
2072735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project#include "TargetInfo.h"
21fa1c08f58afd8246b223c2a55a4ba0a6dac39526Tom Taylor#include "clang/Basic/TargetInfo.h"
2272735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project#include "clang/AST/Decl.h"
2372735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project#include "clang/AST/DeclCXX.h"
24b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick#include "clang/AST/DeclObjC.h"
2572735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project#include "clang/Frontend/CodeGenOptions.h"
2672735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project#include "llvm/Attributes.h"
2772735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project#include "llvm/Support/CallSite.h"
2872735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project#include "llvm/Target/TargetData.h"
2972735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project#include "llvm/InlineAsm.h"
3072735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project#include "llvm/Transforms/Utils/Local.h"
31d64419030e1fec1e751695dab3bd7236e2fb0214Roger Chenusing namespace clang;
32d64419030e1fec1e751695dab3bd7236e2fb0214Roger Chenusing namespace CodeGen;
33d64419030e1fec1e751695dab3bd7236e2fb0214Roger Chen
34d64419030e1fec1e751695dab3bd7236e2fb0214Roger Chen/***/
35d64419030e1fec1e751695dab3bd7236e2fb0214Roger Chen
3672735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Projectstatic unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
3772735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  switch (CC) {
3872735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  default: return llvm::CallingConv::C;
3972735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
40b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick  case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
4172735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
42fa1c08f58afd8246b223c2a55a4ba0a6dac39526Tom Taylor  case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
4372735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
4472735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  // TODO: add support for CC_X86Pascal to llvm
4572735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  }
4672735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project}
4772735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project
4872735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project/// Derives the 'this' type for codegen purposes, i.e. ignoring method
4972735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project/// qualification.
5072735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project/// FIXME: address space qualification?
5172735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Projectstatic CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
52b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick  QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
53fa1c08f58afd8246b223c2a55a4ba0a6dac39526Tom Taylor  return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
5472735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project}
55b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick
5672735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project/// Returns the canonical formal type of the given C++ method.
5772735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Projectstatic CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
5872735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  return MD->getType()->getCanonicalTypeUnqualified()
5972735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project           .getAs<FunctionProtoType>();
6072735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project}
6172735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project
6272735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project/// Returns the "extra-canonicalized" return type, which discards
63b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick/// qualifiers on the return type.  Codegen doesn't care about them,
64b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick/// and it makes ABI code a little easier to be able to assume that
6572735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project/// all parameter and return types are top-level unqualified.
6672735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Projectstatic CanQualType GetReturnType(QualType RetTy) {
6772735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
68f3306bb129f1caade42dac3ad771c372ed5cddfdTom Taylor}
69f3306bb129f1caade42dac3ad771c372ed5cddfdTom Taylor
70f3306bb129f1caade42dac3ad771c372ed5cddfdTom Taylor/// Arrange the argument and result information for a value of the
71f3306bb129f1caade42dac3ad771c372ed5cddfdTom Taylor/// given unprototyped function type.
72b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrickconst CGFunctionInfo &
73b9bcfdd226bbb6f5b265f925343375192963d58aFicus KirkpatrickCodeGenTypes::arrangeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
74fa1c08f58afd8246b223c2a55a4ba0a6dac39526Tom Taylor  // When translating an unprototyped function type, always use a
75b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick  // variadic type.
76b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick  return arrangeFunctionType(FTNP->getResultType().getUnqualifiedType(),
77f3306bb129f1caade42dac3ad771c372ed5cddfdTom Taylor                             ArrayRef<CanQualType>(),
78b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick                             FTNP->getExtInfo(),
79fa1c08f58afd8246b223c2a55a4ba0a6dac39526Tom Taylor                             RequiredArgs(0));
80b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick}
81b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick
82fa1c08f58afd8246b223c2a55a4ba0a6dac39526Tom Taylor/// Arrange the argument and result information for a value of the
83b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick/// given function type, on top of any implicit parameters already
84b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick/// stored.
85b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrickstatic const CGFunctionInfo &arrangeFunctionType(CodeGenTypes &CGT,
86b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick                                  SmallVectorImpl<CanQualType> &argTypes,
87b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick                                             CanQual<FunctionProtoType> FTP) {
88b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick  RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size());
89b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick  // FIXME: Kill copy.
90b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
91b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick    argTypes.push_back(FTP->getArgType(i));
9251e4621fa12400b1e79cc18b7bb0f9a83af6b622Tom Taylor  CanQualType resultType = FTP->getResultType().getUnqualifiedType();
93f3306bb129f1caade42dac3ad771c372ed5cddfdTom Taylor  return CGT.arrangeFunctionType(resultType, argTypes,
94b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick                                 FTP->getExtInfo(), required);
95fa1c08f58afd8246b223c2a55a4ba0a6dac39526Tom Taylor}
96b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick
97b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick/// Arrange the argument and result information for a value of the
98b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick/// given function type.
99fa1c08f58afd8246b223c2a55a4ba0a6dac39526Tom Taylorconst CGFunctionInfo &
10072735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source ProjectCodeGenTypes::arrangeFunctionType(CanQual<FunctionProtoType> FTP) {
10172735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  SmallVector<CanQualType, 16> argTypes;
10272735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  return ::arrangeFunctionType(*this, argTypes, FTP);
10372735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project}
10472735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project
10572735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Projectstatic CallingConv getCallingConventionForDecl(const Decl *D) {
10672735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  // Set the appropriate calling convention for the Function.
10772735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  if (D->hasAttr<StdCallAttr>())
10872735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project    return CC_X86StdCall;
10972735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project
11072735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  if (D->hasAttr<FastCallAttr>())
11172735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project    return CC_X86FastCall;
11272735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project
11372735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  if (D->hasAttr<ThisCallAttr>())
11472735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project    return CC_X86ThisCall;
11572735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project
11672735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  if (D->hasAttr<PascalAttr>())
11772735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project    return CC_X86Pascal;
11872735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project
11972735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  if (PcsAttr *PCS = D->getAttr<PcsAttr>())
12072735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project    return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
121b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick
12272735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  return CC_C;
123b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick}
12472735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project
12572735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project/// Arrange the argument and result information for a call to an
12672735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project/// unknown C++ non-static member function of the given abstract type.
12772735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project/// The member function must be an ordinary function, i.e. not a
12872735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project/// constructor or destructor.
12972735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Projectconst CGFunctionInfo &
13072735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source ProjectCodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
13172735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project                                   const FunctionProtoType *FTP) {
132fa1c08f58afd8246b223c2a55a4ba0a6dac39526Tom Taylor  SmallVector<CanQualType, 16> argTypes;
13372735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project
13472735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  // Add the 'this' pointer.
13572735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  argTypes.push_back(GetThisType(Context, RD));
136fa1c08f58afd8246b223c2a55a4ba0a6dac39526Tom Taylor
13772735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  return ::arrangeFunctionType(*this, argTypes,
13872735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project              FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
13972735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project}
14072735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project
14172735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project/// Arrange the argument and result information for a declaration or
14272735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project/// definition of the given C++ non-static member function.  The
14372735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project/// member function must be an ordinary function, i.e. not a
144fa1c08f58afd8246b223c2a55a4ba0a6dac39526Tom Taylor/// constructor or destructor.
145b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrickconst CGFunctionInfo &
146fa1c08f58afd8246b223c2a55a4ba0a6dac39526Tom TaylorCodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
147b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick  assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!");
148fa1c08f58afd8246b223c2a55a4ba0a6dac39526Tom Taylor  assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
149b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick
150b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick  CanQual<FunctionProtoType> prototype = GetFormalType(MD);
15172735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project
1520355ec10ce30ca48342827dbc60aa44b272b9845Tom Taylor  if (MD->isInstance()) {
1530355ec10ce30ca48342827dbc60aa44b272b9845Tom Taylor    // The abstract case is perfectly fine.
15472735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project    return arrangeCXXMethodType(MD->getParent(), prototype.getTypePtr());
155b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick  }
156b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick
15772735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  return arrangeFunctionType(prototype);
1580355ec10ce30ca48342827dbc60aa44b272b9845Tom Taylor}
1590355ec10ce30ca48342827dbc60aa44b272b9845Tom Taylor
16072735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project/// Arrange the argument and result information for a declaration
161b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick/// or definition to the given constructor variant.
162b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrickconst CGFunctionInfo &
16372735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source ProjectCodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D,
1640355ec10ce30ca48342827dbc60aa44b272b9845Tom Taylor                                               CXXCtorType ctorKind) {
1650355ec10ce30ca48342827dbc60aa44b272b9845Tom Taylor  SmallVector<CanQualType, 16> argTypes;
16672735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  argTypes.push_back(GetThisType(Context, D->getParent()));
167b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick  CanQualType resultType = Context.VoidTy;
168b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick
16972735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes);
17072735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project
17172735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  CanQual<FunctionProtoType> FTP = GetFormalType(D);
17272735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project
173fa1c08f58afd8246b223c2a55a4ba0a6dac39526Tom Taylor  RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size());
174fa1c08f58afd8246b223c2a55a4ba0a6dac39526Tom Taylor
175fa1c08f58afd8246b223c2a55a4ba0a6dac39526Tom Taylor  // Add the formal parameters.
176fa1c08f58afd8246b223c2a55a4ba0a6dac39526Tom Taylor  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
177fa1c08f58afd8246b223c2a55a4ba0a6dac39526Tom Taylor    argTypes.push_back(FTP->getArgType(i));
178fa1c08f58afd8246b223c2a55a4ba0a6dac39526Tom Taylor
179fa1c08f58afd8246b223c2a55a4ba0a6dac39526Tom Taylor  return arrangeFunctionType(resultType, argTypes, FTP->getExtInfo(), required);
180fa1c08f58afd8246b223c2a55a4ba0a6dac39526Tom Taylor}
18172735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project
18272735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project/// Arrange the argument and result information for a declaration,
18372735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project/// definition, or call to the given destructor variant.  It so
184b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick/// happens that all three cases produce the same information.
18572735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Projectconst CGFunctionInfo &
18672735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source ProjectCodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D,
18772735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project                                   CXXDtorType dtorKind) {
18872735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  SmallVector<CanQualType, 2> argTypes;
18972735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  argTypes.push_back(GetThisType(Context, D->getParent()));
19072735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  CanQualType resultType = Context.VoidTy;
19172735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project
19272735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes);
19372735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project
194b9bcfdd226bbb6f5b265f925343375192963d58aFicus Kirkpatrick  CanQual<FunctionProtoType> FTP = GetFormalType(D);
19572735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  assert(FTP->getNumArgs() == 0 && "dtor with formal parameters");
19672735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project
19772735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  return arrangeFunctionType(resultType, argTypes, FTP->getExtInfo(),
198fa1c08f58afd8246b223c2a55a4ba0a6dac39526Tom Taylor                             RequiredArgs::All);
199fa1c08f58afd8246b223c2a55a4ba0a6dac39526Tom Taylor}
2000355ec10ce30ca48342827dbc60aa44b272b9845Tom Taylor
2010355ec10ce30ca48342827dbc60aa44b272b9845Tom Taylor/// Arrange the argument and result information for the declaration or
2020355ec10ce30ca48342827dbc60aa44b272b9845Tom Taylor/// definition of the given function.
20372735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Projectconst CGFunctionInfo &
20472735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source ProjectCodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
20572735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
20672735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project    if (MD->isInstance())
20772735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project      return arrangeCXXMethodDeclaration(MD);
20872735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project
20972735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
21072735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project
21172735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  assert(isa<FunctionType>(FTy));
21272735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project
21372735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  // When declaring a function without a prototype, always use a
21472735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project  // non-variadic type.
215b85cfae9dc792a11296edd3c58747c1e140f6852Tom Taylor  if (isa<FunctionNoProtoType>(FTy)) {
216b85cfae9dc792a11296edd3c58747c1e140f6852Tom Taylor    CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
217b85cfae9dc792a11296edd3c58747c1e140f6852Tom Taylor    return arrangeFunctionType(noProto->getResultType(),
21872735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project                               ArrayRef<CanQualType>(),
21972735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project                               noProto->getExtInfo(),
22072735c62aba8fd2a9420a0f9f83d22543e3c164fThe Android Open Source Project                               RequiredArgs::All);
221  }
222
223  assert(isa<FunctionProtoType>(FTy));
224  return arrangeFunctionType(FTy.getAs<FunctionProtoType>());
225}
226
227/// Arrange the argument and result information for the declaration or
228/// definition of an Objective-C method.
229const CGFunctionInfo &
230CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
231  // It happens that this is the same as a call with no optional
232  // arguments, except also using the formal 'self' type.
233  return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
234}
235
236/// Arrange the argument and result information for the function type
237/// through which to perform a send to the given Objective-C method,
238/// using the given receiver type.  The receiver type is not always
239/// the 'self' type of the method or even an Objective-C pointer type.
240/// This is *not* the right method for actually performing such a
241/// message send, due to the possibility of optional arguments.
242const CGFunctionInfo &
243CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
244                                              QualType receiverType) {
245  SmallVector<CanQualType, 16> argTys;
246  argTys.push_back(Context.getCanonicalParamType(receiverType));
247  argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
248  // FIXME: Kill copy?
249  for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(),
250         e = MD->param_end(); i != e; ++i) {
251    argTys.push_back(Context.getCanonicalParamType((*i)->getType()));
252  }
253
254  FunctionType::ExtInfo einfo;
255  einfo = einfo.withCallingConv(getCallingConventionForDecl(MD));
256
257  if (getContext().getLangOpts().ObjCAutoRefCount &&
258      MD->hasAttr<NSReturnsRetainedAttr>())
259    einfo = einfo.withProducesResult(true);
260
261  RequiredArgs required =
262    (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
263
264  return arrangeFunctionType(GetReturnType(MD->getResultType()), argTys,
265                             einfo, required);
266}
267
268const CGFunctionInfo &
269CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
270  // FIXME: Do we need to handle ObjCMethodDecl?
271  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
272
273  if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
274    return arrangeCXXConstructorDeclaration(CD, GD.getCtorType());
275
276  if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
277    return arrangeCXXDestructor(DD, GD.getDtorType());
278
279  return arrangeFunctionDeclaration(FD);
280}
281
282/// Figure out the rules for calling a function with the given formal
283/// type using the given arguments.  The arguments are necessary
284/// because the function might be unprototyped, in which case it's
285/// target-dependent in crazy ways.
286const CGFunctionInfo &
287CodeGenTypes::arrangeFunctionCall(const CallArgList &args,
288                                  const FunctionType *fnType) {
289  RequiredArgs required = RequiredArgs::All;
290  if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
291    if (proto->isVariadic())
292      required = RequiredArgs(proto->getNumArgs());
293  } else if (CGM.getTargetCodeGenInfo()
294               .isNoProtoCallVariadic(args, cast<FunctionNoProtoType>(fnType))) {
295    required = RequiredArgs(0);
296  }
297
298  return arrangeFunctionCall(fnType->getResultType(), args,
299                             fnType->getExtInfo(), required);
300}
301
302const CGFunctionInfo &
303CodeGenTypes::arrangeFunctionCall(QualType resultType,
304                                  const CallArgList &args,
305                                  const FunctionType::ExtInfo &info,
306                                  RequiredArgs required) {
307  // FIXME: Kill copy.
308  SmallVector<CanQualType, 16> argTypes;
309  for (CallArgList::const_iterator i = args.begin(), e = args.end();
310       i != e; ++i)
311    argTypes.push_back(Context.getCanonicalParamType(i->Ty));
312  return arrangeFunctionType(GetReturnType(resultType), argTypes, info,
313                             required);
314}
315
316const CGFunctionInfo &
317CodeGenTypes::arrangeFunctionDeclaration(QualType resultType,
318                                         const FunctionArgList &args,
319                                         const FunctionType::ExtInfo &info,
320                                         bool isVariadic) {
321  // FIXME: Kill copy.
322  SmallVector<CanQualType, 16> argTypes;
323  for (FunctionArgList::const_iterator i = args.begin(), e = args.end();
324       i != e; ++i)
325    argTypes.push_back(Context.getCanonicalParamType((*i)->getType()));
326
327  RequiredArgs required =
328    (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
329  return arrangeFunctionType(GetReturnType(resultType), argTypes, info,
330                             required);
331}
332
333const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
334  return arrangeFunctionType(getContext().VoidTy, ArrayRef<CanQualType>(),
335                             FunctionType::ExtInfo(), RequiredArgs::All);
336}
337
338/// Arrange the argument and result information for an abstract value
339/// of a given function type.  This is the method which all of the
340/// above functions ultimately defer to.
341const CGFunctionInfo &
342CodeGenTypes::arrangeFunctionType(CanQualType resultType,
343                                  ArrayRef<CanQualType> argTypes,
344                                  const FunctionType::ExtInfo &info,
345                                  RequiredArgs required) {
346#ifndef NDEBUG
347  for (ArrayRef<CanQualType>::const_iterator
348         I = argTypes.begin(), E = argTypes.end(); I != E; ++I)
349    assert(I->isCanonicalAsParam());
350#endif
351
352  unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
353
354  // Lookup or create unique function info.
355  llvm::FoldingSetNodeID ID;
356  CGFunctionInfo::Profile(ID, info, required, resultType, argTypes);
357
358  void *insertPos = 0;
359  CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
360  if (FI)
361    return *FI;
362
363  // Construct the function info.  We co-allocate the ArgInfos.
364  FI = CGFunctionInfo::create(CC, info, resultType, argTypes, required);
365  FunctionInfos.InsertNode(FI, insertPos);
366
367  bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted;
368  assert(inserted && "Recursively being processed?");
369
370  // Compute ABI information.
371  getABIInfo().computeInfo(*FI);
372
373  // Loop over all of the computed argument and return value info.  If any of
374  // them are direct or extend without a specified coerce type, specify the
375  // default now.
376  ABIArgInfo &retInfo = FI->getReturnInfo();
377  if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == 0)
378    retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
379
380  for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
381       I != E; ++I)
382    if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
383      I->info.setCoerceToType(ConvertType(I->type));
384
385  bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
386  assert(erased && "Not in set?");
387
388  return *FI;
389}
390
391CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
392                                       const FunctionType::ExtInfo &info,
393                                       CanQualType resultType,
394                                       ArrayRef<CanQualType> argTypes,
395                                       RequiredArgs required) {
396  void *buffer = operator new(sizeof(CGFunctionInfo) +
397                              sizeof(ArgInfo) * (argTypes.size() + 1));
398  CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
399  FI->CallingConvention = llvmCC;
400  FI->EffectiveCallingConvention = llvmCC;
401  FI->ASTCallingConvention = info.getCC();
402  FI->NoReturn = info.getNoReturn();
403  FI->ReturnsRetained = info.getProducesResult();
404  FI->Required = required;
405  FI->HasRegParm = info.getHasRegParm();
406  FI->RegParm = info.getRegParm();
407  FI->NumArgs = argTypes.size();
408  FI->getArgsBuffer()[0].type = resultType;
409  for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
410    FI->getArgsBuffer()[i + 1].type = argTypes[i];
411  return FI;
412}
413
414/***/
415
416void CodeGenTypes::GetExpandedTypes(QualType type,
417                     SmallVectorImpl<llvm::Type*> &expandedTypes) {
418  if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) {
419    uint64_t NumElts = AT->getSize().getZExtValue();
420    for (uint64_t Elt = 0; Elt < NumElts; ++Elt)
421      GetExpandedTypes(AT->getElementType(), expandedTypes);
422  } else if (const RecordType *RT = type->getAs<RecordType>()) {
423    const RecordDecl *RD = RT->getDecl();
424    assert(!RD->hasFlexibleArrayMember() &&
425           "Cannot expand structure with flexible array.");
426    if (RD->isUnion()) {
427      // Unions can be here only in degenerative cases - all the fields are same
428      // after flattening. Thus we have to use the "largest" field.
429      const FieldDecl *LargestFD = 0;
430      CharUnits UnionSize = CharUnits::Zero();
431
432      for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
433           i != e; ++i) {
434        const FieldDecl *FD = &*i;
435        assert(!FD->isBitField() &&
436               "Cannot expand structure with bit-field members.");
437        CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
438        if (UnionSize < FieldSize) {
439          UnionSize = FieldSize;
440          LargestFD = FD;
441        }
442      }
443      if (LargestFD)
444        GetExpandedTypes(LargestFD->getType(), expandedTypes);
445    } else {
446      for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
447           i != e; ++i) {
448        const FieldDecl &FD = *i;
449        assert(!FD.isBitField() &&
450               "Cannot expand structure with bit-field members.");
451        GetExpandedTypes(FD.getType(), expandedTypes);
452      }
453    }
454  } else if (const ComplexType *CT = type->getAs<ComplexType>()) {
455    llvm::Type *EltTy = ConvertType(CT->getElementType());
456    expandedTypes.push_back(EltTy);
457    expandedTypes.push_back(EltTy);
458  } else
459    expandedTypes.push_back(ConvertType(type));
460}
461
462llvm::Function::arg_iterator
463CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
464                                    llvm::Function::arg_iterator AI) {
465  assert(LV.isSimple() &&
466         "Unexpected non-simple lvalue during struct expansion.");
467
468  if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
469    unsigned NumElts = AT->getSize().getZExtValue();
470    QualType EltTy = AT->getElementType();
471    for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
472      llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, Elt);
473      LValue LV = MakeAddrLValue(EltAddr, EltTy);
474      AI = ExpandTypeFromArgs(EltTy, LV, AI);
475    }
476  } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
477    RecordDecl *RD = RT->getDecl();
478    if (RD->isUnion()) {
479      // Unions can be here only in degenerative cases - all the fields are same
480      // after flattening. Thus we have to use the "largest" field.
481      const FieldDecl *LargestFD = 0;
482      CharUnits UnionSize = CharUnits::Zero();
483
484      for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
485           i != e; ++i) {
486        const FieldDecl *FD = &*i;
487        assert(!FD->isBitField() &&
488               "Cannot expand structure with bit-field members.");
489        CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
490        if (UnionSize < FieldSize) {
491          UnionSize = FieldSize;
492          LargestFD = FD;
493        }
494      }
495      if (LargestFD) {
496        // FIXME: What are the right qualifiers here?
497        LValue SubLV = EmitLValueForField(LV, LargestFD);
498        AI = ExpandTypeFromArgs(LargestFD->getType(), SubLV, AI);
499      }
500    } else {
501      for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
502           i != e; ++i) {
503        FieldDecl *FD = &*i;
504        QualType FT = FD->getType();
505
506        // FIXME: What are the right qualifiers here?
507        LValue SubLV = EmitLValueForField(LV, FD);
508        AI = ExpandTypeFromArgs(FT, SubLV, AI);
509      }
510    }
511  } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
512    QualType EltTy = CT->getElementType();
513    llvm::Value *RealAddr = Builder.CreateStructGEP(LV.getAddress(), 0, "real");
514    EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy));
515    llvm::Value *ImagAddr = Builder.CreateStructGEP(LV.getAddress(), 1, "imag");
516    EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy));
517  } else {
518    EmitStoreThroughLValue(RValue::get(AI), LV);
519    ++AI;
520  }
521
522  return AI;
523}
524
525/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
526/// accessing some number of bytes out of it, try to gep into the struct to get
527/// at its inner goodness.  Dive as deep as possible without entering an element
528/// with an in-memory size smaller than DstSize.
529static llvm::Value *
530EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
531                                   llvm::StructType *SrcSTy,
532                                   uint64_t DstSize, CodeGenFunction &CGF) {
533  // We can't dive into a zero-element struct.
534  if (SrcSTy->getNumElements() == 0) return SrcPtr;
535
536  llvm::Type *FirstElt = SrcSTy->getElementType(0);
537
538  // If the first elt is at least as large as what we're looking for, or if the
539  // first element is the same size as the whole struct, we can enter it.
540  uint64_t FirstEltSize =
541    CGF.CGM.getTargetData().getTypeAllocSize(FirstElt);
542  if (FirstEltSize < DstSize &&
543      FirstEltSize < CGF.CGM.getTargetData().getTypeAllocSize(SrcSTy))
544    return SrcPtr;
545
546  // GEP into the first element.
547  SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
548
549  // If the first element is a struct, recurse.
550  llvm::Type *SrcTy =
551    cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
552  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
553    return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
554
555  return SrcPtr;
556}
557
558/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
559/// are either integers or pointers.  This does a truncation of the value if it
560/// is too large or a zero extension if it is too small.
561static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
562                                             llvm::Type *Ty,
563                                             CodeGenFunction &CGF) {
564  if (Val->getType() == Ty)
565    return Val;
566
567  if (isa<llvm::PointerType>(Val->getType())) {
568    // If this is Pointer->Pointer avoid conversion to and from int.
569    if (isa<llvm::PointerType>(Ty))
570      return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
571
572    // Convert the pointer to an integer so we can play with its width.
573    Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
574  }
575
576  llvm::Type *DestIntTy = Ty;
577  if (isa<llvm::PointerType>(DestIntTy))
578    DestIntTy = CGF.IntPtrTy;
579
580  if (Val->getType() != DestIntTy)
581    Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
582
583  if (isa<llvm::PointerType>(Ty))
584    Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
585  return Val;
586}
587
588
589
590/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
591/// a pointer to an object of type \arg Ty.
592///
593/// This safely handles the case when the src type is smaller than the
594/// destination type; in this situation the values of bits which not
595/// present in the src are undefined.
596static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
597                                      llvm::Type *Ty,
598                                      CodeGenFunction &CGF) {
599  llvm::Type *SrcTy =
600    cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
601
602  // If SrcTy and Ty are the same, just do a load.
603  if (SrcTy == Ty)
604    return CGF.Builder.CreateLoad(SrcPtr);
605
606  uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
607
608  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
609    SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
610    SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
611  }
612
613  uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
614
615  // If the source and destination are integer or pointer types, just do an
616  // extension or truncation to the desired type.
617  if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
618      (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
619    llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
620    return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
621  }
622
623  // If load is legal, just bitcast the src pointer.
624  if (SrcSize >= DstSize) {
625    // Generally SrcSize is never greater than DstSize, since this means we are
626    // losing bits. However, this can happen in cases where the structure has
627    // additional padding, for example due to a user specified alignment.
628    //
629    // FIXME: Assert that we aren't truncating non-padding bits when have access
630    // to that information.
631    llvm::Value *Casted =
632      CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
633    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
634    // FIXME: Use better alignment / avoid requiring aligned load.
635    Load->setAlignment(1);
636    return Load;
637  }
638
639  // Otherwise do coercion through memory. This is stupid, but
640  // simple.
641  llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
642  llvm::Value *Casted =
643    CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
644  llvm::StoreInst *Store =
645    CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
646  // FIXME: Use better alignment / avoid requiring aligned store.
647  Store->setAlignment(1);
648  return CGF.Builder.CreateLoad(Tmp);
649}
650
651// Function to store a first-class aggregate into memory.  We prefer to
652// store the elements rather than the aggregate to be more friendly to
653// fast-isel.
654// FIXME: Do we need to recurse here?
655static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
656                          llvm::Value *DestPtr, bool DestIsVolatile,
657                          bool LowAlignment) {
658  // Prefer scalar stores to first-class aggregate stores.
659  if (llvm::StructType *STy =
660        dyn_cast<llvm::StructType>(Val->getType())) {
661    for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
662      llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i);
663      llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
664      llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr,
665                                                    DestIsVolatile);
666      if (LowAlignment)
667        SI->setAlignment(1);
668    }
669  } else {
670    llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile);
671    if (LowAlignment)
672      SI->setAlignment(1);
673  }
674}
675
676/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
677/// where the source and destination may have different types.
678///
679/// This safely handles the case when the src type is larger than the
680/// destination type; the upper bits of the src will be lost.
681static void CreateCoercedStore(llvm::Value *Src,
682                               llvm::Value *DstPtr,
683                               bool DstIsVolatile,
684                               CodeGenFunction &CGF) {
685  llvm::Type *SrcTy = Src->getType();
686  llvm::Type *DstTy =
687    cast<llvm::PointerType>(DstPtr->getType())->getElementType();
688  if (SrcTy == DstTy) {
689    CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
690    return;
691  }
692
693  uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
694
695  if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
696    DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
697    DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
698  }
699
700  // If the source and destination are integer or pointer types, just do an
701  // extension or truncation to the desired type.
702  if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
703      (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
704    Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
705    CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
706    return;
707  }
708
709  uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
710
711  // If store is legal, just bitcast the src pointer.
712  if (SrcSize <= DstSize) {
713    llvm::Value *Casted =
714      CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
715    // FIXME: Use better alignment / avoid requiring aligned store.
716    BuildAggStore(CGF, Src, Casted, DstIsVolatile, true);
717  } else {
718    // Otherwise do coercion through memory. This is stupid, but
719    // simple.
720
721    // Generally SrcSize is never greater than DstSize, since this means we are
722    // losing bits. However, this can happen in cases where the structure has
723    // additional padding, for example due to a user specified alignment.
724    //
725    // FIXME: Assert that we aren't truncating non-padding bits when have access
726    // to that information.
727    llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
728    CGF.Builder.CreateStore(Src, Tmp);
729    llvm::Value *Casted =
730      CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
731    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
732    // FIXME: Use better alignment / avoid requiring aligned load.
733    Load->setAlignment(1);
734    CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile);
735  }
736}
737
738/***/
739
740bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
741  return FI.getReturnInfo().isIndirect();
742}
743
744bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
745  if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
746    switch (BT->getKind()) {
747    default:
748      return false;
749    case BuiltinType::Float:
750      return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Float);
751    case BuiltinType::Double:
752      return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Double);
753    case BuiltinType::LongDouble:
754      return getContext().getTargetInfo().useObjCFPRetForRealType(
755        TargetInfo::LongDouble);
756    }
757  }
758
759  return false;
760}
761
762bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
763  if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
764    if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
765      if (BT->getKind() == BuiltinType::LongDouble)
766        return getContext().getTargetInfo().useObjCFP2RetForComplexLongDouble();
767    }
768  }
769
770  return false;
771}
772
773llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
774  const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
775  return GetFunctionType(FI);
776}
777
778llvm::FunctionType *
779CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
780
781  bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted;
782  assert(Inserted && "Recursively being processed?");
783
784  SmallVector<llvm::Type*, 8> argTypes;
785  llvm::Type *resultType = 0;
786
787  const ABIArgInfo &retAI = FI.getReturnInfo();
788  switch (retAI.getKind()) {
789  case ABIArgInfo::Expand:
790    llvm_unreachable("Invalid ABI kind for return argument");
791
792  case ABIArgInfo::Extend:
793  case ABIArgInfo::Direct:
794    resultType = retAI.getCoerceToType();
795    break;
796
797  case ABIArgInfo::Indirect: {
798    assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
799    resultType = llvm::Type::getVoidTy(getLLVMContext());
800
801    QualType ret = FI.getReturnType();
802    llvm::Type *ty = ConvertType(ret);
803    unsigned addressSpace = Context.getTargetAddressSpace(ret);
804    argTypes.push_back(llvm::PointerType::get(ty, addressSpace));
805    break;
806  }
807
808  case ABIArgInfo::Ignore:
809    resultType = llvm::Type::getVoidTy(getLLVMContext());
810    break;
811  }
812
813  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
814         ie = FI.arg_end(); it != ie; ++it) {
815    const ABIArgInfo &argAI = it->info;
816
817    switch (argAI.getKind()) {
818    case ABIArgInfo::Ignore:
819      break;
820
821    case ABIArgInfo::Indirect: {
822      // indirect arguments are always on the stack, which is addr space #0.
823      llvm::Type *LTy = ConvertTypeForMem(it->type);
824      argTypes.push_back(LTy->getPointerTo());
825      break;
826    }
827
828    case ABIArgInfo::Extend:
829    case ABIArgInfo::Direct: {
830      // Insert a padding type to ensure proper alignment.
831      if (llvm::Type *PaddingType = argAI.getPaddingType())
832        argTypes.push_back(PaddingType);
833      // If the coerce-to type is a first class aggregate, flatten it.  Either
834      // way is semantically identical, but fast-isel and the optimizer
835      // generally likes scalar values better than FCAs.
836      llvm::Type *argType = argAI.getCoerceToType();
837      if (llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) {
838        for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
839          argTypes.push_back(st->getElementType(i));
840      } else {
841        argTypes.push_back(argType);
842      }
843      break;
844    }
845
846    case ABIArgInfo::Expand:
847      GetExpandedTypes(it->type, argTypes);
848      break;
849    }
850  }
851
852  bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
853  assert(Erased && "Not in set?");
854
855  return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic());
856}
857
858llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
859  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
860  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
861
862  if (!isFuncTypeConvertible(FPT))
863    return llvm::StructType::get(getLLVMContext());
864
865  const CGFunctionInfo *Info;
866  if (isa<CXXDestructorDecl>(MD))
867    Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType());
868  else
869    Info = &arrangeCXXMethodDeclaration(MD);
870  return GetFunctionType(*Info);
871}
872
873void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
874                                           const Decl *TargetDecl,
875                                           AttributeListType &PAL,
876                                           unsigned &CallingConv) {
877  llvm::Attributes FuncAttrs;
878  llvm::Attributes RetAttrs;
879
880  CallingConv = FI.getEffectiveCallingConvention();
881
882  if (FI.isNoReturn())
883    FuncAttrs |= llvm::Attribute::NoReturn;
884
885  // FIXME: handle sseregparm someday...
886  if (TargetDecl) {
887    if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
888      FuncAttrs |= llvm::Attribute::ReturnsTwice;
889    if (TargetDecl->hasAttr<NoThrowAttr>())
890      FuncAttrs |= llvm::Attribute::NoUnwind;
891    else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
892      const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
893      if (FPT && FPT->isNothrow(getContext()))
894        FuncAttrs |= llvm::Attribute::NoUnwind;
895    }
896
897    if (TargetDecl->hasAttr<NoReturnAttr>())
898      FuncAttrs |= llvm::Attribute::NoReturn;
899
900    if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
901      FuncAttrs |= llvm::Attribute::ReturnsTwice;
902
903    // 'const' and 'pure' attribute functions are also nounwind.
904    if (TargetDecl->hasAttr<ConstAttr>()) {
905      FuncAttrs |= llvm::Attribute::ReadNone;
906      FuncAttrs |= llvm::Attribute::NoUnwind;
907    } else if (TargetDecl->hasAttr<PureAttr>()) {
908      FuncAttrs |= llvm::Attribute::ReadOnly;
909      FuncAttrs |= llvm::Attribute::NoUnwind;
910    }
911    if (TargetDecl->hasAttr<MallocAttr>())
912      RetAttrs |= llvm::Attribute::NoAlias;
913  }
914
915  if (CodeGenOpts.OptimizeSize)
916    FuncAttrs |= llvm::Attribute::OptimizeForSize;
917  if (CodeGenOpts.DisableRedZone)
918    FuncAttrs |= llvm::Attribute::NoRedZone;
919  if (CodeGenOpts.NoImplicitFloat)
920    FuncAttrs |= llvm::Attribute::NoImplicitFloat;
921
922  QualType RetTy = FI.getReturnType();
923  unsigned Index = 1;
924  const ABIArgInfo &RetAI = FI.getReturnInfo();
925  switch (RetAI.getKind()) {
926  case ABIArgInfo::Extend:
927   if (RetTy->hasSignedIntegerRepresentation())
928     RetAttrs |= llvm::Attribute::SExt;
929   else if (RetTy->hasUnsignedIntegerRepresentation())
930     RetAttrs |= llvm::Attribute::ZExt;
931    break;
932  case ABIArgInfo::Direct:
933  case ABIArgInfo::Ignore:
934    break;
935
936  case ABIArgInfo::Indirect:
937    PAL.push_back(llvm::AttributeWithIndex::get(Index,
938                                                llvm::Attribute::StructRet));
939    ++Index;
940    // sret disables readnone and readonly
941    FuncAttrs &= ~(llvm::Attribute::ReadOnly |
942                   llvm::Attribute::ReadNone);
943    break;
944
945  case ABIArgInfo::Expand:
946    llvm_unreachable("Invalid ABI kind for return argument");
947  }
948
949  if (RetAttrs)
950    PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
951
952  // FIXME: RegParm should be reduced in case of global register variable.
953  signed RegParm;
954  if (FI.getHasRegParm())
955    RegParm = FI.getRegParm();
956  else
957    RegParm = CodeGenOpts.NumRegisterParameters;
958
959  unsigned PointerWidth = getContext().getTargetInfo().getPointerWidth(0);
960  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
961         ie = FI.arg_end(); it != ie; ++it) {
962    QualType ParamType = it->type;
963    const ABIArgInfo &AI = it->info;
964    llvm::Attributes Attrs;
965
966    // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
967    // have the corresponding parameter variable.  It doesn't make
968    // sense to do it here because parameters are so messed up.
969    switch (AI.getKind()) {
970    case ABIArgInfo::Extend:
971      if (ParamType->isSignedIntegerOrEnumerationType())
972        Attrs |= llvm::Attribute::SExt;
973      else if (ParamType->isUnsignedIntegerOrEnumerationType())
974        Attrs |= llvm::Attribute::ZExt;
975      // FALL THROUGH
976    case ABIArgInfo::Direct:
977      if (RegParm > 0 &&
978          (ParamType->isIntegerType() || ParamType->isPointerType() ||
979           ParamType->isReferenceType())) {
980        RegParm -=
981        (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
982        if (RegParm >= 0)
983          Attrs |= llvm::Attribute::InReg;
984      }
985      // FIXME: handle sseregparm someday...
986
987      // Increment Index if there is padding.
988      Index += (AI.getPaddingType() != 0);
989
990      if (llvm::StructType *STy =
991            dyn_cast<llvm::StructType>(AI.getCoerceToType()))
992        Index += STy->getNumElements()-1;  // 1 will be added below.
993      break;
994
995    case ABIArgInfo::Indirect:
996      if (AI.getIndirectByVal())
997        Attrs |= llvm::Attribute::ByVal;
998
999      Attrs |=
1000        llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
1001      // byval disables readnone and readonly.
1002      FuncAttrs &= ~(llvm::Attribute::ReadOnly |
1003                     llvm::Attribute::ReadNone);
1004      break;
1005
1006    case ABIArgInfo::Ignore:
1007      // Skip increment, no matching LLVM parameter.
1008      continue;
1009
1010    case ABIArgInfo::Expand: {
1011      SmallVector<llvm::Type*, 8> types;
1012      // FIXME: This is rather inefficient. Do we ever actually need to do
1013      // anything here? The result should be just reconstructed on the other
1014      // side, so extension should be a non-issue.
1015      getTypes().GetExpandedTypes(ParamType, types);
1016      Index += types.size();
1017      continue;
1018    }
1019    }
1020
1021    if (Attrs)
1022      PAL.push_back(llvm::AttributeWithIndex::get(Index, Attrs));
1023    ++Index;
1024  }
1025  if (FuncAttrs)
1026    PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
1027}
1028
1029/// An argument came in as a promoted argument; demote it back to its
1030/// declared type.
1031static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
1032                                         const VarDecl *var,
1033                                         llvm::Value *value) {
1034  llvm::Type *varType = CGF.ConvertType(var->getType());
1035
1036  // This can happen with promotions that actually don't change the
1037  // underlying type, like the enum promotions.
1038  if (value->getType() == varType) return value;
1039
1040  assert((varType->isIntegerTy() || varType->isFloatingPointTy())
1041         && "unexpected promotion type");
1042
1043  if (isa<llvm::IntegerType>(varType))
1044    return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
1045
1046  return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
1047}
1048
1049void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
1050                                         llvm::Function *Fn,
1051                                         const FunctionArgList &Args) {
1052  // If this is an implicit-return-zero function, go ahead and
1053  // initialize the return value.  TODO: it might be nice to have
1054  // a more general mechanism for this that didn't require synthesized
1055  // return statements.
1056  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
1057    if (FD->hasImplicitReturnZero()) {
1058      QualType RetTy = FD->getResultType().getUnqualifiedType();
1059      llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
1060      llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
1061      Builder.CreateStore(Zero, ReturnValue);
1062    }
1063  }
1064
1065  // FIXME: We no longer need the types from FunctionArgList; lift up and
1066  // simplify.
1067
1068  // Emit allocs for param decls.  Give the LLVM Argument nodes names.
1069  llvm::Function::arg_iterator AI = Fn->arg_begin();
1070
1071  // Name the struct return argument.
1072  if (CGM.ReturnTypeUsesSRet(FI)) {
1073    AI->setName("agg.result");
1074    AI->addAttr(llvm::Attribute::NoAlias);
1075    ++AI;
1076  }
1077
1078  assert(FI.arg_size() == Args.size() &&
1079         "Mismatch between function signature & arguments.");
1080  unsigned ArgNo = 1;
1081  CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
1082  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1083       i != e; ++i, ++info_it, ++ArgNo) {
1084    const VarDecl *Arg = *i;
1085    QualType Ty = info_it->type;
1086    const ABIArgInfo &ArgI = info_it->info;
1087
1088    bool isPromoted =
1089      isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
1090
1091    switch (ArgI.getKind()) {
1092    case ABIArgInfo::Indirect: {
1093      llvm::Value *V = AI;
1094
1095      if (hasAggregateLLVMType(Ty)) {
1096        // Aggregates and complex variables are accessed by reference.  All we
1097        // need to do is realign the value, if requested
1098        if (ArgI.getIndirectRealign()) {
1099          llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
1100
1101          // Copy from the incoming argument pointer to the temporary with the
1102          // appropriate alignment.
1103          //
1104          // FIXME: We should have a common utility for generating an aggregate
1105          // copy.
1106          llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
1107          CharUnits Size = getContext().getTypeSizeInChars(Ty);
1108          llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
1109          llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
1110          Builder.CreateMemCpy(Dst,
1111                               Src,
1112                               llvm::ConstantInt::get(IntPtrTy,
1113                                                      Size.getQuantity()),
1114                               ArgI.getIndirectAlign(),
1115                               false);
1116          V = AlignedTemp;
1117        }
1118      } else {
1119        // Load scalar value from indirect argument.
1120        CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
1121        V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty);
1122
1123        if (isPromoted)
1124          V = emitArgumentDemotion(*this, Arg, V);
1125      }
1126      EmitParmDecl(*Arg, V, ArgNo);
1127      break;
1128    }
1129
1130    case ABIArgInfo::Extend:
1131    case ABIArgInfo::Direct: {
1132      // Skip the dummy padding argument.
1133      if (ArgI.getPaddingType())
1134        ++AI;
1135
1136      // If we have the trivial case, handle it with no muss and fuss.
1137      if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
1138          ArgI.getCoerceToType() == ConvertType(Ty) &&
1139          ArgI.getDirectOffset() == 0) {
1140        assert(AI != Fn->arg_end() && "Argument mismatch!");
1141        llvm::Value *V = AI;
1142
1143        if (Arg->getType().isRestrictQualified())
1144          AI->addAttr(llvm::Attribute::NoAlias);
1145
1146        // Ensure the argument is the correct type.
1147        if (V->getType() != ArgI.getCoerceToType())
1148          V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
1149
1150        if (isPromoted)
1151          V = emitArgumentDemotion(*this, Arg, V);
1152
1153        EmitParmDecl(*Arg, V, ArgNo);
1154        break;
1155      }
1156
1157      llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName());
1158
1159      // The alignment we need to use is the max of the requested alignment for
1160      // the argument plus the alignment required by our access code below.
1161      unsigned AlignmentToUse =
1162        CGM.getTargetData().getABITypeAlignment(ArgI.getCoerceToType());
1163      AlignmentToUse = std::max(AlignmentToUse,
1164                        (unsigned)getContext().getDeclAlign(Arg).getQuantity());
1165
1166      Alloca->setAlignment(AlignmentToUse);
1167      llvm::Value *V = Alloca;
1168      llvm::Value *Ptr = V;    // Pointer to store into.
1169
1170      // If the value is offset in memory, apply the offset now.
1171      if (unsigned Offs = ArgI.getDirectOffset()) {
1172        Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
1173        Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
1174        Ptr = Builder.CreateBitCast(Ptr,
1175                          llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
1176      }
1177
1178      // If the coerce-to type is a first class aggregate, we flatten it and
1179      // pass the elements. Either way is semantically identical, but fast-isel
1180      // and the optimizer generally likes scalar values better than FCAs.
1181      llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
1182      if (STy && STy->getNumElements() > 1) {
1183        uint64_t SrcSize = CGM.getTargetData().getTypeAllocSize(STy);
1184        llvm::Type *DstTy =
1185          cast<llvm::PointerType>(Ptr->getType())->getElementType();
1186        uint64_t DstSize = CGM.getTargetData().getTypeAllocSize(DstTy);
1187
1188        if (SrcSize <= DstSize) {
1189          Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
1190
1191          for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1192            assert(AI != Fn->arg_end() && "Argument mismatch!");
1193            AI->setName(Arg->getName() + ".coerce" + Twine(i));
1194            llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
1195            Builder.CreateStore(AI++, EltPtr);
1196          }
1197        } else {
1198          llvm::AllocaInst *TempAlloca =
1199            CreateTempAlloca(ArgI.getCoerceToType(), "coerce");
1200          TempAlloca->setAlignment(AlignmentToUse);
1201          llvm::Value *TempV = TempAlloca;
1202
1203          for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1204            assert(AI != Fn->arg_end() && "Argument mismatch!");
1205            AI->setName(Arg->getName() + ".coerce" + Twine(i));
1206            llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i);
1207            Builder.CreateStore(AI++, EltPtr);
1208          }
1209
1210          Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse);
1211        }
1212      } else {
1213        // Simple case, just do a coerced store of the argument into the alloca.
1214        assert(AI != Fn->arg_end() && "Argument mismatch!");
1215        AI->setName(Arg->getName() + ".coerce");
1216        CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
1217      }
1218
1219
1220      // Match to what EmitParmDecl is expecting for this type.
1221      if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
1222        V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty);
1223        if (isPromoted)
1224          V = emitArgumentDemotion(*this, Arg, V);
1225      }
1226      EmitParmDecl(*Arg, V, ArgNo);
1227      continue;  // Skip ++AI increment, already done.
1228    }
1229
1230    case ABIArgInfo::Expand: {
1231      // If this structure was expanded into multiple arguments then
1232      // we need to create a temporary and reconstruct it from the
1233      // arguments.
1234      llvm::AllocaInst *Alloca = CreateMemTemp(Ty);
1235      CharUnits Align = getContext().getDeclAlign(Arg);
1236      Alloca->setAlignment(Align.getQuantity());
1237      LValue LV = MakeAddrLValue(Alloca, Ty, Align);
1238      llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI);
1239      EmitParmDecl(*Arg, Alloca, ArgNo);
1240
1241      // Name the arguments used in expansion and increment AI.
1242      unsigned Index = 0;
1243      for (; AI != End; ++AI, ++Index)
1244        AI->setName(Arg->getName() + "." + Twine(Index));
1245      continue;
1246    }
1247
1248    case ABIArgInfo::Ignore:
1249      // Initialize the local variable appropriately.
1250      if (hasAggregateLLVMType(Ty))
1251        EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo);
1252      else
1253        EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())),
1254                     ArgNo);
1255
1256      // Skip increment, no matching LLVM parameter.
1257      continue;
1258    }
1259
1260    ++AI;
1261  }
1262  assert(AI == Fn->arg_end() && "Argument mismatch!");
1263}
1264
1265static void eraseUnusedBitCasts(llvm::Instruction *insn) {
1266  while (insn->use_empty()) {
1267    llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
1268    if (!bitcast) return;
1269
1270    // This is "safe" because we would have used a ConstantExpr otherwise.
1271    insn = cast<llvm::Instruction>(bitcast->getOperand(0));
1272    bitcast->eraseFromParent();
1273  }
1274}
1275
1276/// Try to emit a fused autorelease of a return result.
1277static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
1278                                                    llvm::Value *result) {
1279  // We must be immediately followed the cast.
1280  llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
1281  if (BB->empty()) return 0;
1282  if (&BB->back() != result) return 0;
1283
1284  llvm::Type *resultType = result->getType();
1285
1286  // result is in a BasicBlock and is therefore an Instruction.
1287  llvm::Instruction *generator = cast<llvm::Instruction>(result);
1288
1289  SmallVector<llvm::Instruction*,4> insnsToKill;
1290
1291  // Look for:
1292  //  %generator = bitcast %type1* %generator2 to %type2*
1293  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
1294    // We would have emitted this as a constant if the operand weren't
1295    // an Instruction.
1296    generator = cast<llvm::Instruction>(bitcast->getOperand(0));
1297
1298    // Require the generator to be immediately followed by the cast.
1299    if (generator->getNextNode() != bitcast)
1300      return 0;
1301
1302    insnsToKill.push_back(bitcast);
1303  }
1304
1305  // Look for:
1306  //   %generator = call i8* @objc_retain(i8* %originalResult)
1307  // or
1308  //   %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
1309  llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
1310  if (!call) return 0;
1311
1312  bool doRetainAutorelease;
1313
1314  if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) {
1315    doRetainAutorelease = true;
1316  } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints()
1317                                          .objc_retainAutoreleasedReturnValue) {
1318    doRetainAutorelease = false;
1319
1320    // Look for an inline asm immediately preceding the call and kill it, too.
1321    llvm::Instruction *prev = call->getPrevNode();
1322    if (llvm::CallInst *asmCall = dyn_cast_or_null<llvm::CallInst>(prev))
1323      if (asmCall->getCalledValue()
1324            == CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker)
1325        insnsToKill.push_back(prev);
1326  } else {
1327    return 0;
1328  }
1329
1330  result = call->getArgOperand(0);
1331  insnsToKill.push_back(call);
1332
1333  // Keep killing bitcasts, for sanity.  Note that we no longer care
1334  // about precise ordering as long as there's exactly one use.
1335  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
1336    if (!bitcast->hasOneUse()) break;
1337    insnsToKill.push_back(bitcast);
1338    result = bitcast->getOperand(0);
1339  }
1340
1341  // Delete all the unnecessary instructions, from latest to earliest.
1342  for (SmallVectorImpl<llvm::Instruction*>::iterator
1343         i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
1344    (*i)->eraseFromParent();
1345
1346  // Do the fused retain/autorelease if we were asked to.
1347  if (doRetainAutorelease)
1348    result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
1349
1350  // Cast back to the result type.
1351  return CGF.Builder.CreateBitCast(result, resultType);
1352}
1353
1354/// If this is a +1 of the value of an immutable 'self', remove it.
1355static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
1356                                          llvm::Value *result) {
1357  // This is only applicable to a method with an immutable 'self'.
1358  const ObjCMethodDecl *method = dyn_cast<ObjCMethodDecl>(CGF.CurCodeDecl);
1359  if (!method) return 0;
1360  const VarDecl *self = method->getSelfDecl();
1361  if (!self->getType().isConstQualified()) return 0;
1362
1363  // Look for a retain call.
1364  llvm::CallInst *retainCall =
1365    dyn_cast<llvm::CallInst>(result->stripPointerCasts());
1366  if (!retainCall ||
1367      retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain)
1368    return 0;
1369
1370  // Look for an ordinary load of 'self'.
1371  llvm::Value *retainedValue = retainCall->getArgOperand(0);
1372  llvm::LoadInst *load =
1373    dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
1374  if (!load || load->isAtomic() || load->isVolatile() ||
1375      load->getPointerOperand() != CGF.GetAddrOfLocalVar(self))
1376    return 0;
1377
1378  // Okay!  Burn it all down.  This relies for correctness on the
1379  // assumption that the retain is emitted as part of the return and
1380  // that thereafter everything is used "linearly".
1381  llvm::Type *resultType = result->getType();
1382  eraseUnusedBitCasts(cast<llvm::Instruction>(result));
1383  assert(retainCall->use_empty());
1384  retainCall->eraseFromParent();
1385  eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
1386
1387  return CGF.Builder.CreateBitCast(load, resultType);
1388}
1389
1390/// Emit an ARC autorelease of the result of a function.
1391///
1392/// \return the value to actually return from the function
1393static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
1394                                            llvm::Value *result) {
1395  // If we're returning 'self', kill the initial retain.  This is a
1396  // heuristic attempt to "encourage correctness" in the really unfortunate
1397  // case where we have a return of self during a dealloc and we desperately
1398  // need to avoid the possible autorelease.
1399  if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
1400    return self;
1401
1402  // At -O0, try to emit a fused retain/autorelease.
1403  if (CGF.shouldUseFusedARCCalls())
1404    if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
1405      return fused;
1406
1407  return CGF.EmitARCAutoreleaseReturnValue(result);
1408}
1409
1410/// Heuristically search for a dominating store to the return-value slot.
1411static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
1412  // If there are multiple uses of the return-value slot, just check
1413  // for something immediately preceding the IP.  Sometimes this can
1414  // happen with how we generate implicit-returns; it can also happen
1415  // with noreturn cleanups.
1416  if (!CGF.ReturnValue->hasOneUse()) {
1417    llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
1418    if (IP->empty()) return 0;
1419    llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back());
1420    if (!store) return 0;
1421    if (store->getPointerOperand() != CGF.ReturnValue) return 0;
1422    assert(!store->isAtomic() && !store->isVolatile()); // see below
1423    return store;
1424  }
1425
1426  llvm::StoreInst *store =
1427    dyn_cast<llvm::StoreInst>(CGF.ReturnValue->use_back());
1428  if (!store) return 0;
1429
1430  // These aren't actually possible for non-coerced returns, and we
1431  // only care about non-coerced returns on this code path.
1432  assert(!store->isAtomic() && !store->isVolatile());
1433
1434  // Now do a first-and-dirty dominance check: just walk up the
1435  // single-predecessors chain from the current insertion point.
1436  llvm::BasicBlock *StoreBB = store->getParent();
1437  llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
1438  while (IP != StoreBB) {
1439    if (!(IP = IP->getSinglePredecessor()))
1440      return 0;
1441  }
1442
1443  // Okay, the store's basic block dominates the insertion point; we
1444  // can do our thing.
1445  return store;
1446}
1447
1448void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
1449  // Functions with no result always return void.
1450  if (ReturnValue == 0) {
1451    Builder.CreateRetVoid();
1452    return;
1453  }
1454
1455  llvm::DebugLoc RetDbgLoc;
1456  llvm::Value *RV = 0;
1457  QualType RetTy = FI.getReturnType();
1458  const ABIArgInfo &RetAI = FI.getReturnInfo();
1459
1460  switch (RetAI.getKind()) {
1461  case ABIArgInfo::Indirect: {
1462    unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
1463    if (RetTy->isAnyComplexType()) {
1464      ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
1465      StoreComplexToAddr(RT, CurFn->arg_begin(), false);
1466    } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1467      // Do nothing; aggregrates get evaluated directly into the destination.
1468    } else {
1469      EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
1470                        false, Alignment, RetTy);
1471    }
1472    break;
1473  }
1474
1475  case ABIArgInfo::Extend:
1476  case ABIArgInfo::Direct:
1477    if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
1478        RetAI.getDirectOffset() == 0) {
1479      // The internal return value temp always will have pointer-to-return-type
1480      // type, just do a load.
1481
1482      // If there is a dominating store to ReturnValue, we can elide
1483      // the load, zap the store, and usually zap the alloca.
1484      if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) {
1485        // Get the stored value and nuke the now-dead store.
1486        RetDbgLoc = SI->getDebugLoc();
1487        RV = SI->getValueOperand();
1488        SI->eraseFromParent();
1489
1490        // If that was the only use of the return value, nuke it as well now.
1491        if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
1492          cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
1493          ReturnValue = 0;
1494        }
1495
1496      // Otherwise, we have to do a simple load.
1497      } else {
1498        RV = Builder.CreateLoad(ReturnValue);
1499      }
1500    } else {
1501      llvm::Value *V = ReturnValue;
1502      // If the value is offset in memory, apply the offset now.
1503      if (unsigned Offs = RetAI.getDirectOffset()) {
1504        V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
1505        V = Builder.CreateConstGEP1_32(V, Offs);
1506        V = Builder.CreateBitCast(V,
1507                         llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
1508      }
1509
1510      RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
1511    }
1512
1513    // In ARC, end functions that return a retainable type with a call
1514    // to objc_autoreleaseReturnValue.
1515    if (AutoreleaseResult) {
1516      assert(getLangOpts().ObjCAutoRefCount &&
1517             !FI.isReturnsRetained() &&
1518             RetTy->isObjCRetainableType());
1519      RV = emitAutoreleaseOfResult(*this, RV);
1520    }
1521
1522    break;
1523
1524  case ABIArgInfo::Ignore:
1525    break;
1526
1527  case ABIArgInfo::Expand:
1528    llvm_unreachable("Invalid ABI kind for return argument");
1529  }
1530
1531  llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
1532  if (!RetDbgLoc.isUnknown())
1533    Ret->setDebugLoc(RetDbgLoc);
1534}
1535
1536void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
1537                                          const VarDecl *param) {
1538  // StartFunction converted the ABI-lowered parameter(s) into a
1539  // local alloca.  We need to turn that into an r-value suitable
1540  // for EmitCall.
1541  llvm::Value *local = GetAddrOfLocalVar(param);
1542
1543  QualType type = param->getType();
1544
1545  // For the most part, we just need to load the alloca, except:
1546  // 1) aggregate r-values are actually pointers to temporaries, and
1547  // 2) references to aggregates are pointers directly to the aggregate.
1548  // I don't know why references to non-aggregates are different here.
1549  if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
1550    if (hasAggregateLLVMType(ref->getPointeeType()))
1551      return args.add(RValue::getAggregate(local), type);
1552
1553    // Locals which are references to scalars are represented
1554    // with allocas holding the pointer.
1555    return args.add(RValue::get(Builder.CreateLoad(local)), type);
1556  }
1557
1558  if (type->isAnyComplexType()) {
1559    ComplexPairTy complex = LoadComplexFromAddr(local, /*volatile*/ false);
1560    return args.add(RValue::getComplex(complex), type);
1561  }
1562
1563  if (hasAggregateLLVMType(type))
1564    return args.add(RValue::getAggregate(local), type);
1565
1566  unsigned alignment = getContext().getDeclAlign(param).getQuantity();
1567  llvm::Value *value = EmitLoadOfScalar(local, false, alignment, type);
1568  return args.add(RValue::get(value), type);
1569}
1570
1571static bool isProvablyNull(llvm::Value *addr) {
1572  return isa<llvm::ConstantPointerNull>(addr);
1573}
1574
1575static bool isProvablyNonNull(llvm::Value *addr) {
1576  return isa<llvm::AllocaInst>(addr);
1577}
1578
1579/// Emit the actual writing-back of a writeback.
1580static void emitWriteback(CodeGenFunction &CGF,
1581                          const CallArgList::Writeback &writeback) {
1582  llvm::Value *srcAddr = writeback.Address;
1583  assert(!isProvablyNull(srcAddr) &&
1584         "shouldn't have writeback for provably null argument");
1585
1586  llvm::BasicBlock *contBB = 0;
1587
1588  // If the argument wasn't provably non-null, we need to null check
1589  // before doing the store.
1590  bool provablyNonNull = isProvablyNonNull(srcAddr);
1591  if (!provablyNonNull) {
1592    llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
1593    contBB = CGF.createBasicBlock("icr.done");
1594
1595    llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
1596    CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
1597    CGF.EmitBlock(writebackBB);
1598  }
1599
1600  // Load the value to writeback.
1601  llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
1602
1603  // Cast it back, in case we're writing an id to a Foo* or something.
1604  value = CGF.Builder.CreateBitCast(value,
1605               cast<llvm::PointerType>(srcAddr->getType())->getElementType(),
1606                            "icr.writeback-cast");
1607
1608  // Perform the writeback.
1609  QualType srcAddrType = writeback.AddressType;
1610  CGF.EmitStoreThroughLValue(RValue::get(value),
1611                             CGF.MakeAddrLValue(srcAddr, srcAddrType));
1612
1613  // Jump to the continuation block.
1614  if (!provablyNonNull)
1615    CGF.EmitBlock(contBB);
1616}
1617
1618static void emitWritebacks(CodeGenFunction &CGF,
1619                           const CallArgList &args) {
1620  for (CallArgList::writeback_iterator
1621         i = args.writeback_begin(), e = args.writeback_end(); i != e; ++i)
1622    emitWriteback(CGF, *i);
1623}
1624
1625/// Emit an argument that's being passed call-by-writeback.  That is,
1626/// we are passing the address of
1627static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
1628                             const ObjCIndirectCopyRestoreExpr *CRE) {
1629  llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
1630
1631  // The dest and src types don't necessarily match in LLVM terms
1632  // because of the crazy ObjC compatibility rules.
1633
1634  llvm::PointerType *destType =
1635    cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
1636
1637  // If the address is a constant null, just pass the appropriate null.
1638  if (isProvablyNull(srcAddr)) {
1639    args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
1640             CRE->getType());
1641    return;
1642  }
1643
1644  QualType srcAddrType =
1645    CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
1646
1647  // Create the temporary.
1648  llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(),
1649                                           "icr.temp");
1650
1651  // Zero-initialize it if we're not doing a copy-initialization.
1652  bool shouldCopy = CRE->shouldCopy();
1653  if (!shouldCopy) {
1654    llvm::Value *null =
1655      llvm::ConstantPointerNull::get(
1656        cast<llvm::PointerType>(destType->getElementType()));
1657    CGF.Builder.CreateStore(null, temp);
1658  }
1659
1660  llvm::BasicBlock *contBB = 0;
1661
1662  // If the address is *not* known to be non-null, we need to switch.
1663  llvm::Value *finalArgument;
1664
1665  bool provablyNonNull = isProvablyNonNull(srcAddr);
1666  if (provablyNonNull) {
1667    finalArgument = temp;
1668  } else {
1669    llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
1670
1671    finalArgument = CGF.Builder.CreateSelect(isNull,
1672                                   llvm::ConstantPointerNull::get(destType),
1673                                             temp, "icr.argument");
1674
1675    // If we need to copy, then the load has to be conditional, which
1676    // means we need control flow.
1677    if (shouldCopy) {
1678      contBB = CGF.createBasicBlock("icr.cont");
1679      llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
1680      CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
1681      CGF.EmitBlock(copyBB);
1682    }
1683  }
1684
1685  // Perform a copy if necessary.
1686  if (shouldCopy) {
1687    LValue srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
1688    RValue srcRV = CGF.EmitLoadOfLValue(srcLV);
1689    assert(srcRV.isScalar());
1690
1691    llvm::Value *src = srcRV.getScalarVal();
1692    src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
1693                                    "icr.cast");
1694
1695    // Use an ordinary store, not a store-to-lvalue.
1696    CGF.Builder.CreateStore(src, temp);
1697  }
1698
1699  // Finish the control flow if we needed it.
1700  if (shouldCopy && !provablyNonNull)
1701    CGF.EmitBlock(contBB);
1702
1703  args.addWriteback(srcAddr, srcAddrType, temp);
1704  args.add(RValue::get(finalArgument), CRE->getType());
1705}
1706
1707void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
1708                                  QualType type) {
1709  if (const ObjCIndirectCopyRestoreExpr *CRE
1710        = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
1711    assert(getContext().getLangOpts().ObjCAutoRefCount);
1712    assert(getContext().hasSameType(E->getType(), type));
1713    return emitWritebackArg(*this, args, CRE);
1714  }
1715
1716  assert(type->isReferenceType() == E->isGLValue() &&
1717         "reference binding to unmaterialized r-value!");
1718
1719  if (E->isGLValue()) {
1720    assert(E->getObjectKind() == OK_Ordinary);
1721    return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0),
1722                    type);
1723  }
1724
1725  if (hasAggregateLLVMType(type) && !E->getType()->isAnyComplexType() &&
1726      isa<ImplicitCastExpr>(E) &&
1727      cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
1728    LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
1729    assert(L.isSimple());
1730    args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
1731    return;
1732  }
1733
1734  args.add(EmitAnyExprToTemp(E), type);
1735}
1736
1737// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
1738// optimizer it can aggressively ignore unwind edges.
1739void
1740CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
1741  if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
1742      !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
1743    Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
1744                      CGM.getNoObjCARCExceptionsMetadata());
1745}
1746
1747/// Emits a call or invoke instruction to the given function, depending
1748/// on the current state of the EH stack.
1749llvm::CallSite
1750CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
1751                                  ArrayRef<llvm::Value *> Args,
1752                                  const Twine &Name) {
1753  llvm::BasicBlock *InvokeDest = getInvokeDest();
1754
1755  llvm::Instruction *Inst;
1756  if (!InvokeDest)
1757    Inst = Builder.CreateCall(Callee, Args, Name);
1758  else {
1759    llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
1760    Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name);
1761    EmitBlock(ContBB);
1762  }
1763
1764  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
1765  // optimizer it can aggressively ignore unwind edges.
1766  if (CGM.getLangOpts().ObjCAutoRefCount)
1767    AddObjCARCExceptionMetadata(Inst);
1768
1769  return Inst;
1770}
1771
1772llvm::CallSite
1773CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
1774                                  const Twine &Name) {
1775  return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name);
1776}
1777
1778static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo,
1779                            llvm::FunctionType *FTy) {
1780  if (ArgNo < FTy->getNumParams())
1781    assert(Elt->getType() == FTy->getParamType(ArgNo));
1782  else
1783    assert(FTy->isVarArg());
1784  ++ArgNo;
1785}
1786
1787void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
1788                                       SmallVector<llvm::Value*,16> &Args,
1789                                       llvm::FunctionType *IRFuncTy) {
1790  if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
1791    unsigned NumElts = AT->getSize().getZExtValue();
1792    QualType EltTy = AT->getElementType();
1793    llvm::Value *Addr = RV.getAggregateAddr();
1794    for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
1795      llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
1796      LValue LV = MakeAddrLValue(EltAddr, EltTy);
1797      RValue EltRV;
1798      if (EltTy->isAnyComplexType())
1799        // FIXME: Volatile?
1800        EltRV = RValue::getComplex(LoadComplexFromAddr(LV.getAddress(), false));
1801      else if (CodeGenFunction::hasAggregateLLVMType(EltTy))
1802        EltRV = LV.asAggregateRValue();
1803      else
1804        EltRV = EmitLoadOfLValue(LV);
1805      ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy);
1806    }
1807  } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
1808    RecordDecl *RD = RT->getDecl();
1809    assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
1810    LValue LV = MakeAddrLValue(RV.getAggregateAddr(), Ty);
1811
1812    if (RD->isUnion()) {
1813      const FieldDecl *LargestFD = 0;
1814      CharUnits UnionSize = CharUnits::Zero();
1815
1816      for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1817           i != e; ++i) {
1818        const FieldDecl *FD = &*i;
1819        assert(!FD->isBitField() &&
1820               "Cannot expand structure with bit-field members.");
1821        CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
1822        if (UnionSize < FieldSize) {
1823          UnionSize = FieldSize;
1824          LargestFD = FD;
1825        }
1826      }
1827      if (LargestFD) {
1828        RValue FldRV = EmitRValueForField(LV, LargestFD);
1829        ExpandTypeToArgs(LargestFD->getType(), FldRV, Args, IRFuncTy);
1830      }
1831    } else {
1832      for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1833           i != e; ++i) {
1834        FieldDecl *FD = &*i;
1835
1836        RValue FldRV = EmitRValueForField(LV, FD);
1837        ExpandTypeToArgs(FD->getType(), FldRV, Args, IRFuncTy);
1838      }
1839    }
1840  } else if (Ty->isAnyComplexType()) {
1841    ComplexPairTy CV = RV.getComplexVal();
1842    Args.push_back(CV.first);
1843    Args.push_back(CV.second);
1844  } else {
1845    assert(RV.isScalar() &&
1846           "Unexpected non-scalar rvalue during struct expansion.");
1847
1848    // Insert a bitcast as needed.
1849    llvm::Value *V = RV.getScalarVal();
1850    if (Args.size() < IRFuncTy->getNumParams() &&
1851        V->getType() != IRFuncTy->getParamType(Args.size()))
1852      V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size()));
1853
1854    Args.push_back(V);
1855  }
1856}
1857
1858
1859RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
1860                                 llvm::Value *Callee,
1861                                 ReturnValueSlot ReturnValue,
1862                                 const CallArgList &CallArgs,
1863                                 const Decl *TargetDecl,
1864                                 llvm::Instruction **callOrInvoke) {
1865  // FIXME: We no longer need the types from CallArgs; lift up and simplify.
1866  SmallVector<llvm::Value*, 16> Args;
1867
1868  // Handle struct-return functions by passing a pointer to the
1869  // location that we would like to return into.
1870  QualType RetTy = CallInfo.getReturnType();
1871  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
1872
1873  // IRArgNo - Keep track of the argument number in the callee we're looking at.
1874  unsigned IRArgNo = 0;
1875  llvm::FunctionType *IRFuncTy =
1876    cast<llvm::FunctionType>(
1877                  cast<llvm::PointerType>(Callee->getType())->getElementType());
1878
1879  // If the call returns a temporary with struct return, create a temporary
1880  // alloca to hold the result, unless one is given to us.
1881  if (CGM.ReturnTypeUsesSRet(CallInfo)) {
1882    llvm::Value *Value = ReturnValue.getValue();
1883    if (!Value)
1884      Value = CreateMemTemp(RetTy);
1885    Args.push_back(Value);
1886    checkArgMatches(Value, IRArgNo, IRFuncTy);
1887  }
1888
1889  assert(CallInfo.arg_size() == CallArgs.size() &&
1890         "Mismatch between function signature & arguments.");
1891  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
1892  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
1893       I != E; ++I, ++info_it) {
1894    const ABIArgInfo &ArgInfo = info_it->info;
1895    RValue RV = I->RV;
1896
1897    unsigned TypeAlign =
1898      getContext().getTypeAlignInChars(I->Ty).getQuantity();
1899    switch (ArgInfo.getKind()) {
1900    case ABIArgInfo::Indirect: {
1901      if (RV.isScalar() || RV.isComplex()) {
1902        // Make a temporary alloca to pass the argument.
1903        llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
1904        if (ArgInfo.getIndirectAlign() > AI->getAlignment())
1905          AI->setAlignment(ArgInfo.getIndirectAlign());
1906        Args.push_back(AI);
1907
1908        if (RV.isScalar())
1909          EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false,
1910                            TypeAlign, I->Ty);
1911        else
1912          StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
1913
1914        // Validate argument match.
1915        checkArgMatches(AI, IRArgNo, IRFuncTy);
1916      } else {
1917        // We want to avoid creating an unnecessary temporary+copy here;
1918        // however, we need one in two cases:
1919        // 1. If the argument is not byval, and we are required to copy the
1920        //    source.  (This case doesn't occur on any common architecture.)
1921        // 2. If the argument is byval, RV is not sufficiently aligned, and
1922        //    we cannot force it to be sufficiently aligned.
1923        llvm::Value *Addr = RV.getAggregateAddr();
1924        unsigned Align = ArgInfo.getIndirectAlign();
1925        const llvm::TargetData *TD = &CGM.getTargetData();
1926        if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
1927            (ArgInfo.getIndirectByVal() && TypeAlign < Align &&
1928             llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align)) {
1929          // Create an aligned temporary, and copy to it.
1930          llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
1931          if (Align > AI->getAlignment())
1932            AI->setAlignment(Align);
1933          Args.push_back(AI);
1934          EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
1935
1936          // Validate argument match.
1937          checkArgMatches(AI, IRArgNo, IRFuncTy);
1938        } else {
1939          // Skip the extra memcpy call.
1940          Args.push_back(Addr);
1941
1942          // Validate argument match.
1943          checkArgMatches(Addr, IRArgNo, IRFuncTy);
1944        }
1945      }
1946      break;
1947    }
1948
1949    case ABIArgInfo::Ignore:
1950      break;
1951
1952    case ABIArgInfo::Extend:
1953    case ABIArgInfo::Direct: {
1954      // Insert a padding argument to ensure proper alignment.
1955      if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) {
1956        Args.push_back(llvm::UndefValue::get(PaddingType));
1957        ++IRArgNo;
1958      }
1959
1960      if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
1961          ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
1962          ArgInfo.getDirectOffset() == 0) {
1963        llvm::Value *V;
1964        if (RV.isScalar())
1965          V = RV.getScalarVal();
1966        else
1967          V = Builder.CreateLoad(RV.getAggregateAddr());
1968
1969        // If the argument doesn't match, perform a bitcast to coerce it.  This
1970        // can happen due to trivial type mismatches.
1971        if (IRArgNo < IRFuncTy->getNumParams() &&
1972            V->getType() != IRFuncTy->getParamType(IRArgNo))
1973          V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo));
1974        Args.push_back(V);
1975
1976        checkArgMatches(V, IRArgNo, IRFuncTy);
1977        break;
1978      }
1979
1980      // FIXME: Avoid the conversion through memory if possible.
1981      llvm::Value *SrcPtr;
1982      if (RV.isScalar()) {
1983        SrcPtr = CreateMemTemp(I->Ty, "coerce");
1984        EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, TypeAlign, I->Ty);
1985      } else if (RV.isComplex()) {
1986        SrcPtr = CreateMemTemp(I->Ty, "coerce");
1987        StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
1988      } else
1989        SrcPtr = RV.getAggregateAddr();
1990
1991      // If the value is offset in memory, apply the offset now.
1992      if (unsigned Offs = ArgInfo.getDirectOffset()) {
1993        SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
1994        SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
1995        SrcPtr = Builder.CreateBitCast(SrcPtr,
1996                       llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
1997
1998      }
1999
2000      // If the coerce-to type is a first class aggregate, we flatten it and
2001      // pass the elements. Either way is semantically identical, but fast-isel
2002      // and the optimizer generally likes scalar values better than FCAs.
2003      if (llvm::StructType *STy =
2004            dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
2005        SrcPtr = Builder.CreateBitCast(SrcPtr,
2006                                       llvm::PointerType::getUnqual(STy));
2007        for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2008          llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
2009          llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
2010          // We don't know what we're loading from.
2011          LI->setAlignment(1);
2012          Args.push_back(LI);
2013
2014          // Validate argument match.
2015          checkArgMatches(LI, IRArgNo, IRFuncTy);
2016        }
2017      } else {
2018        // In the simple case, just pass the coerced loaded value.
2019        Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
2020                                         *this));
2021
2022        // Validate argument match.
2023        checkArgMatches(Args.back(), IRArgNo, IRFuncTy);
2024      }
2025
2026      break;
2027    }
2028
2029    case ABIArgInfo::Expand:
2030      ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy);
2031      IRArgNo = Args.size();
2032      break;
2033    }
2034  }
2035
2036  // If the callee is a bitcast of a function to a varargs pointer to function
2037  // type, check to see if we can remove the bitcast.  This handles some cases
2038  // with unprototyped functions.
2039  if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
2040    if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
2041      llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
2042      llvm::FunctionType *CurFT =
2043        cast<llvm::FunctionType>(CurPT->getElementType());
2044      llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
2045
2046      if (CE->getOpcode() == llvm::Instruction::BitCast &&
2047          ActualFT->getReturnType() == CurFT->getReturnType() &&
2048          ActualFT->getNumParams() == CurFT->getNumParams() &&
2049          ActualFT->getNumParams() == Args.size() &&
2050          (CurFT->isVarArg() || !ActualFT->isVarArg())) {
2051        bool ArgsMatch = true;
2052        for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
2053          if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
2054            ArgsMatch = false;
2055            break;
2056          }
2057
2058        // Strip the cast if we can get away with it.  This is a nice cleanup,
2059        // but also allows us to inline the function at -O0 if it is marked
2060        // always_inline.
2061        if (ArgsMatch)
2062          Callee = CalleeF;
2063      }
2064    }
2065
2066  unsigned CallingConv;
2067  CodeGen::AttributeListType AttributeList;
2068  CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
2069  llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
2070                                                   AttributeList.end());
2071
2072  llvm::BasicBlock *InvokeDest = 0;
2073  if (!(Attrs.getFnAttributes() & llvm::Attribute::NoUnwind))
2074    InvokeDest = getInvokeDest();
2075
2076  llvm::CallSite CS;
2077  if (!InvokeDest) {
2078    CS = Builder.CreateCall(Callee, Args);
2079  } else {
2080    llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
2081    CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args);
2082    EmitBlock(Cont);
2083  }
2084  if (callOrInvoke)
2085    *callOrInvoke = CS.getInstruction();
2086
2087  CS.setAttributes(Attrs);
2088  CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
2089
2090  // add metadata for __attribute__((alloc_size(foo)))
2091  if (TargetDecl) {
2092    if (const AllocSizeAttr* Attr = TargetDecl->getAttr<AllocSizeAttr>()) {
2093      SmallVector<llvm::Value*, 4> Args;
2094      llvm::IntegerType *Ty = llvm::IntegerType::getInt32Ty(getLLVMContext());
2095      bool isMethod = false;
2096      if (const CXXMethodDecl *MDecl = dyn_cast<CXXMethodDecl>(TargetDecl))
2097        isMethod = MDecl->isInstance();
2098
2099      for (AllocSizeAttr::args_iterator I = Attr->args_begin(),
2100           E = Attr->args_end(); I != E; ++I) {
2101        Args.push_back(llvm::ConstantInt::get(Ty, *I + isMethod));
2102      }
2103
2104      llvm::MDNode *MD = llvm::MDNode::get(getLLVMContext(), Args);
2105      CS.getInstruction()->setMetadata("alloc_size", MD);
2106    }
2107  }
2108
2109  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
2110  // optimizer it can aggressively ignore unwind edges.
2111  if (CGM.getLangOpts().ObjCAutoRefCount)
2112    AddObjCARCExceptionMetadata(CS.getInstruction());
2113
2114  // If the call doesn't return, finish the basic block and clear the
2115  // insertion point; this allows the rest of IRgen to discard
2116  // unreachable code.
2117  if (CS.doesNotReturn()) {
2118    Builder.CreateUnreachable();
2119    Builder.ClearInsertionPoint();
2120
2121    // FIXME: For now, emit a dummy basic block because expr emitters in
2122    // generally are not ready to handle emitting expressions at unreachable
2123    // points.
2124    EnsureInsertPoint();
2125
2126    // Return a reasonable RValue.
2127    return GetUndefRValue(RetTy);
2128  }
2129
2130  llvm::Instruction *CI = CS.getInstruction();
2131  if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
2132    CI->setName("call");
2133
2134  // Emit any writebacks immediately.  Arguably this should happen
2135  // after any return-value munging.
2136  if (CallArgs.hasWritebacks())
2137    emitWritebacks(*this, CallArgs);
2138
2139  switch (RetAI.getKind()) {
2140  case ABIArgInfo::Indirect: {
2141    unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
2142    if (RetTy->isAnyComplexType())
2143      return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
2144    if (CodeGenFunction::hasAggregateLLVMType(RetTy))
2145      return RValue::getAggregate(Args[0]);
2146    return RValue::get(EmitLoadOfScalar(Args[0], false, Alignment, RetTy));
2147  }
2148
2149  case ABIArgInfo::Ignore:
2150    // If we are ignoring an argument that had a result, make sure to
2151    // construct the appropriate return value for our caller.
2152    return GetUndefRValue(RetTy);
2153
2154  case ABIArgInfo::Extend:
2155  case ABIArgInfo::Direct: {
2156    llvm::Type *RetIRTy = ConvertType(RetTy);
2157    if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
2158      if (RetTy->isAnyComplexType()) {
2159        llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
2160        llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
2161        return RValue::getComplex(std::make_pair(Real, Imag));
2162      }
2163      if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
2164        llvm::Value *DestPtr = ReturnValue.getValue();
2165        bool DestIsVolatile = ReturnValue.isVolatile();
2166
2167        if (!DestPtr) {
2168          DestPtr = CreateMemTemp(RetTy, "agg.tmp");
2169          DestIsVolatile = false;
2170        }
2171        BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
2172        return RValue::getAggregate(DestPtr);
2173      }
2174
2175      // If the argument doesn't match, perform a bitcast to coerce it.  This
2176      // can happen due to trivial type mismatches.
2177      llvm::Value *V = CI;
2178      if (V->getType() != RetIRTy)
2179        V = Builder.CreateBitCast(V, RetIRTy);
2180      return RValue::get(V);
2181    }
2182
2183    llvm::Value *DestPtr = ReturnValue.getValue();
2184    bool DestIsVolatile = ReturnValue.isVolatile();
2185
2186    if (!DestPtr) {
2187      DestPtr = CreateMemTemp(RetTy, "coerce");
2188      DestIsVolatile = false;
2189    }
2190
2191    // If the value is offset in memory, apply the offset now.
2192    llvm::Value *StorePtr = DestPtr;
2193    if (unsigned Offs = RetAI.getDirectOffset()) {
2194      StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
2195      StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
2196      StorePtr = Builder.CreateBitCast(StorePtr,
2197                         llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
2198    }
2199    CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
2200
2201    unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
2202    if (RetTy->isAnyComplexType())
2203      return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
2204    if (CodeGenFunction::hasAggregateLLVMType(RetTy))
2205      return RValue::getAggregate(DestPtr);
2206    return RValue::get(EmitLoadOfScalar(DestPtr, false, Alignment, RetTy));
2207  }
2208
2209  case ABIArgInfo::Expand:
2210    llvm_unreachable("Invalid ABI kind for return argument");
2211  }
2212
2213  llvm_unreachable("Unhandled ABIArgInfo::Kind");
2214}
2215
2216/* VarArg handling */
2217
2218llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
2219  return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
2220}
2221