CGCall.cpp revision 575a1c9dc8dc5b4977194993e289f9eda7295c39
1//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// These classes wrap the information about a call or function
11// definition used to handle ABI compliancy.
12//
13//===----------------------------------------------------------------------===//
14
15#include "CGCall.h"
16#include "CGCXXABI.h"
17#include "ABIInfo.h"
18#include "CodeGenFunction.h"
19#include "CodeGenModule.h"
20#include "clang/Basic/TargetInfo.h"
21#include "clang/AST/Decl.h"
22#include "clang/AST/DeclCXX.h"
23#include "clang/AST/DeclObjC.h"
24#include "clang/Frontend/CodeGenOptions.h"
25#include "llvm/Attributes.h"
26#include "llvm/Support/CallSite.h"
27#include "llvm/Target/TargetData.h"
28using namespace clang;
29using namespace CodeGen;
30
31/***/
32
33static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
34  switch (CC) {
35  default: return llvm::CallingConv::C;
36  case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
37  case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
38  case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
39  case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
40  case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
41  // TODO: add support for CC_X86Pascal to llvm
42  }
43}
44
45/// Derives the 'this' type for codegen purposes, i.e. ignoring method
46/// qualification.
47/// FIXME: address space qualification?
48static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
49  QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
50  return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
51}
52
53/// Returns the canonical formal type of the given C++ method.
54static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
55  return MD->getType()->getCanonicalTypeUnqualified()
56           .getAs<FunctionProtoType>();
57}
58
59/// Returns the "extra-canonicalized" return type, which discards
60/// qualifiers on the return type.  Codegen doesn't care about them,
61/// and it makes ABI code a little easier to be able to assume that
62/// all parameter and return types are top-level unqualified.
63static CanQualType GetReturnType(QualType RetTy) {
64  return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
65}
66
67const CGFunctionInfo &
68CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP,
69                              bool IsRecursive) {
70  return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
71                         llvm::SmallVector<CanQualType, 16>(),
72                         FTNP->getExtInfo(), IsRecursive);
73}
74
75/// \param Args - contains any initial parameters besides those
76///   in the formal type
77static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT,
78                                  llvm::SmallVectorImpl<CanQualType> &ArgTys,
79                                             CanQual<FunctionProtoType> FTP,
80                                             bool IsRecursive = false) {
81  // FIXME: Kill copy.
82  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
83    ArgTys.push_back(FTP->getArgType(i));
84  CanQualType ResTy = FTP->getResultType().getUnqualifiedType();
85  return CGT.getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo(), IsRecursive);
86}
87
88const CGFunctionInfo &
89CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP,
90                              bool IsRecursive) {
91  llvm::SmallVector<CanQualType, 16> ArgTys;
92  return ::getFunctionInfo(*this, ArgTys, FTP, IsRecursive);
93}
94
95static CallingConv getCallingConventionForDecl(const Decl *D) {
96  // Set the appropriate calling convention for the Function.
97  if (D->hasAttr<StdCallAttr>())
98    return CC_X86StdCall;
99
100  if (D->hasAttr<FastCallAttr>())
101    return CC_X86FastCall;
102
103  if (D->hasAttr<ThisCallAttr>())
104    return CC_X86ThisCall;
105
106  if (D->hasAttr<PascalAttr>())
107    return CC_X86Pascal;
108
109  if (PcsAttr *PCS = D->getAttr<PcsAttr>())
110    return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
111
112  return CC_C;
113}
114
115const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD,
116                                                 const FunctionProtoType *FTP) {
117  llvm::SmallVector<CanQualType, 16> ArgTys;
118
119  // Add the 'this' pointer.
120  ArgTys.push_back(GetThisType(Context, RD));
121
122  return ::getFunctionInfo(*this, ArgTys,
123              FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
124}
125
126const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
127  llvm::SmallVector<CanQualType, 16> ArgTys;
128
129  assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!");
130  assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
131
132  // Add the 'this' pointer unless this is a static method.
133  if (MD->isInstance())
134    ArgTys.push_back(GetThisType(Context, MD->getParent()));
135
136  return ::getFunctionInfo(*this, ArgTys, GetFormalType(MD));
137}
138
139const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
140                                                    CXXCtorType Type) {
141  llvm::SmallVector<CanQualType, 16> ArgTys;
142  ArgTys.push_back(GetThisType(Context, D->getParent()));
143  CanQualType ResTy = Context.VoidTy;
144
145  TheCXXABI.BuildConstructorSignature(D, Type, ResTy, ArgTys);
146
147  CanQual<FunctionProtoType> FTP = GetFormalType(D);
148
149  // Add the formal parameters.
150  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
151    ArgTys.push_back(FTP->getArgType(i));
152
153  return getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
154}
155
156const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXDestructorDecl *D,
157                                                    CXXDtorType Type) {
158  llvm::SmallVector<CanQualType, 2> ArgTys;
159  ArgTys.push_back(GetThisType(Context, D->getParent()));
160  CanQualType ResTy = Context.VoidTy;
161
162  TheCXXABI.BuildDestructorSignature(D, Type, ResTy, ArgTys);
163
164  CanQual<FunctionProtoType> FTP = GetFormalType(D);
165  assert(FTP->getNumArgs() == 0 && "dtor with formal parameters");
166
167  return getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
168}
169
170const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
171  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
172    if (MD->isInstance())
173      return getFunctionInfo(MD);
174
175  CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
176  assert(isa<FunctionType>(FTy));
177  if (isa<FunctionNoProtoType>(FTy))
178    return getFunctionInfo(FTy.getAs<FunctionNoProtoType>());
179  assert(isa<FunctionProtoType>(FTy));
180  return getFunctionInfo(FTy.getAs<FunctionProtoType>());
181}
182
183const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
184  llvm::SmallVector<CanQualType, 16> ArgTys;
185  ArgTys.push_back(Context.getCanonicalParamType(MD->getSelfDecl()->getType()));
186  ArgTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
187  // FIXME: Kill copy?
188  for (ObjCMethodDecl::param_iterator i = MD->param_begin(),
189         e = MD->param_end(); i != e; ++i) {
190    ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
191  }
192  return getFunctionInfo(GetReturnType(MD->getResultType()),
193                         ArgTys,
194                         FunctionType::ExtInfo(
195                             /*NoReturn*/ false,
196                             /*HasRegParm*/ false,
197                             /*RegParm*/ 0,
198                             getCallingConventionForDecl(MD)));
199}
200
201const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) {
202  // FIXME: Do we need to handle ObjCMethodDecl?
203  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
204
205  if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
206    return getFunctionInfo(CD, GD.getCtorType());
207
208  if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
209    return getFunctionInfo(DD, GD.getDtorType());
210
211  return getFunctionInfo(FD);
212}
213
214const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
215                                                    const CallArgList &Args,
216                                            const FunctionType::ExtInfo &Info) {
217  // FIXME: Kill copy.
218  llvm::SmallVector<CanQualType, 16> ArgTys;
219  for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
220       i != e; ++i)
221    ArgTys.push_back(Context.getCanonicalParamType(i->Ty));
222  return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
223}
224
225const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
226                                                    const FunctionArgList &Args,
227                                            const FunctionType::ExtInfo &Info) {
228  // FIXME: Kill copy.
229  llvm::SmallVector<CanQualType, 16> ArgTys;
230  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
231       i != e; ++i)
232    ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
233  return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
234}
235
236const CGFunctionInfo &CodeGenTypes::getNullaryFunctionInfo() {
237  llvm::SmallVector<CanQualType, 1> args;
238  return getFunctionInfo(getContext().VoidTy, args, FunctionType::ExtInfo());
239}
240
241const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
242                           const llvm::SmallVectorImpl<CanQualType> &ArgTys,
243                                            const FunctionType::ExtInfo &Info,
244                                                    bool IsRecursive) {
245#ifndef NDEBUG
246  for (llvm::SmallVectorImpl<CanQualType>::const_iterator
247         I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I)
248    assert(I->isCanonicalAsParam());
249#endif
250
251  unsigned CC = ClangCallConvToLLVMCallConv(Info.getCC());
252
253  // Lookup or create unique function info.
254  llvm::FoldingSetNodeID ID;
255  CGFunctionInfo::Profile(ID, Info, ResTy,
256                          ArgTys.begin(), ArgTys.end());
257
258  void *InsertPos = 0;
259  CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
260  if (FI)
261    return *FI;
262
263  // Construct the function info.
264  FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getHasRegParm(), Info.getRegParm(), ResTy,
265                          ArgTys.data(), ArgTys.size());
266  FunctionInfos.InsertNode(FI, InsertPos);
267
268  // Compute ABI information.
269  getABIInfo().computeInfo(*FI);
270
271  // Loop over all of the computed argument and return value info.  If any of
272  // them are direct or extend without a specified coerce type, specify the
273  // default now.
274  ABIArgInfo &RetInfo = FI->getReturnInfo();
275  if (RetInfo.canHaveCoerceToType() && RetInfo.getCoerceToType() == 0)
276    RetInfo.setCoerceToType(ConvertTypeRecursive(FI->getReturnType()));
277
278  for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
279       I != E; ++I)
280    if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
281      I->info.setCoerceToType(ConvertTypeRecursive(I->type));
282
283  // If this is a top-level call and ConvertTypeRecursive hit unresolved pointer
284  // types, resolve them now.  These pointers may point to this function, which
285  // we *just* filled in the FunctionInfo for.
286  if (!IsRecursive && !PointersToResolve.empty())
287    HandleLateResolvedPointers();
288
289  return *FI;
290}
291
292CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
293                               bool _NoReturn, bool _HasRegParm, unsigned _RegParm,
294                               CanQualType ResTy,
295                               const CanQualType *ArgTys,
296                               unsigned NumArgTys)
297  : CallingConvention(_CallingConvention),
298    EffectiveCallingConvention(_CallingConvention),
299    NoReturn(_NoReturn), HasRegParm(_HasRegParm), RegParm(_RegParm)
300{
301  NumArgs = NumArgTys;
302
303  // FIXME: Coallocate with the CGFunctionInfo object.
304  Args = new ArgInfo[1 + NumArgTys];
305  Args[0].type = ResTy;
306  for (unsigned i = 0; i != NumArgTys; ++i)
307    Args[1 + i].type = ArgTys[i];
308}
309
310/***/
311
312void CodeGenTypes::GetExpandedTypes(QualType type,
313                     llvm::SmallVectorImpl<const llvm::Type*> &expandedTypes,
314                                    bool isRecursive) {
315  const RecordType *RT = type->getAsStructureType();
316  assert(RT && "Can only expand structure types.");
317  const RecordDecl *RD = RT->getDecl();
318  assert(!RD->hasFlexibleArrayMember() &&
319         "Cannot expand structure with flexible array.");
320
321  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
322         i != e; ++i) {
323    const FieldDecl *FD = *i;
324    assert(!FD->isBitField() &&
325           "Cannot expand structure with bit-field members.");
326
327    QualType fieldType = FD->getType();
328    if (fieldType->isRecordType())
329      GetExpandedTypes(fieldType, expandedTypes, isRecursive);
330    else
331      expandedTypes.push_back(ConvertType(fieldType, isRecursive));
332  }
333}
334
335llvm::Function::arg_iterator
336CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
337                                    llvm::Function::arg_iterator AI) {
338  const RecordType *RT = Ty->getAsStructureType();
339  assert(RT && "Can only expand structure types.");
340
341  RecordDecl *RD = RT->getDecl();
342  assert(LV.isSimple() &&
343         "Unexpected non-simple lvalue during struct expansion.");
344  llvm::Value *Addr = LV.getAddress();
345  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
346         i != e; ++i) {
347    FieldDecl *FD = *i;
348    QualType FT = FD->getType();
349
350    // FIXME: What are the right qualifiers here?
351    LValue LV = EmitLValueForField(Addr, FD, 0);
352    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
353      AI = ExpandTypeFromArgs(FT, LV, AI);
354    } else {
355      EmitStoreThroughLValue(RValue::get(AI), LV, FT);
356      ++AI;
357    }
358  }
359
360  return AI;
361}
362
363void
364CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
365                                  llvm::SmallVector<llvm::Value*, 16> &Args) {
366  const RecordType *RT = Ty->getAsStructureType();
367  assert(RT && "Can only expand structure types.");
368
369  RecordDecl *RD = RT->getDecl();
370  assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
371  llvm::Value *Addr = RV.getAggregateAddr();
372  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
373         i != e; ++i) {
374    FieldDecl *FD = *i;
375    QualType FT = FD->getType();
376
377    // FIXME: What are the right qualifiers here?
378    LValue LV = EmitLValueForField(Addr, FD, 0);
379    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
380      ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
381    } else {
382      RValue RV = EmitLoadOfLValue(LV, FT);
383      assert(RV.isScalar() &&
384             "Unexpected non-scalar rvalue during struct expansion.");
385      Args.push_back(RV.getScalarVal());
386    }
387  }
388}
389
390/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
391/// accessing some number of bytes out of it, try to gep into the struct to get
392/// at its inner goodness.  Dive as deep as possible without entering an element
393/// with an in-memory size smaller than DstSize.
394static llvm::Value *
395EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
396                                   const llvm::StructType *SrcSTy,
397                                   uint64_t DstSize, CodeGenFunction &CGF) {
398  // We can't dive into a zero-element struct.
399  if (SrcSTy->getNumElements() == 0) return SrcPtr;
400
401  const llvm::Type *FirstElt = SrcSTy->getElementType(0);
402
403  // If the first elt is at least as large as what we're looking for, or if the
404  // first element is the same size as the whole struct, we can enter it.
405  uint64_t FirstEltSize =
406    CGF.CGM.getTargetData().getTypeAllocSize(FirstElt);
407  if (FirstEltSize < DstSize &&
408      FirstEltSize < CGF.CGM.getTargetData().getTypeAllocSize(SrcSTy))
409    return SrcPtr;
410
411  // GEP into the first element.
412  SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
413
414  // If the first element is a struct, recurse.
415  const llvm::Type *SrcTy =
416    cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
417  if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
418    return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
419
420  return SrcPtr;
421}
422
423/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
424/// are either integers or pointers.  This does a truncation of the value if it
425/// is too large or a zero extension if it is too small.
426static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
427                                             const llvm::Type *Ty,
428                                             CodeGenFunction &CGF) {
429  if (Val->getType() == Ty)
430    return Val;
431
432  if (isa<llvm::PointerType>(Val->getType())) {
433    // If this is Pointer->Pointer avoid conversion to and from int.
434    if (isa<llvm::PointerType>(Ty))
435      return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
436
437    // Convert the pointer to an integer so we can play with its width.
438    Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
439  }
440
441  const llvm::Type *DestIntTy = Ty;
442  if (isa<llvm::PointerType>(DestIntTy))
443    DestIntTy = CGF.IntPtrTy;
444
445  if (Val->getType() != DestIntTy)
446    Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
447
448  if (isa<llvm::PointerType>(Ty))
449    Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
450  return Val;
451}
452
453
454
455/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
456/// a pointer to an object of type \arg Ty.
457///
458/// This safely handles the case when the src type is smaller than the
459/// destination type; in this situation the values of bits which not
460/// present in the src are undefined.
461static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
462                                      const llvm::Type *Ty,
463                                      CodeGenFunction &CGF) {
464  const llvm::Type *SrcTy =
465    cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
466
467  // If SrcTy and Ty are the same, just do a load.
468  if (SrcTy == Ty)
469    return CGF.Builder.CreateLoad(SrcPtr);
470
471  uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
472
473  if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
474    SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
475    SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
476  }
477
478  uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
479
480  // If the source and destination are integer or pointer types, just do an
481  // extension or truncation to the desired type.
482  if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
483      (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
484    llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
485    return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
486  }
487
488  // If load is legal, just bitcast the src pointer.
489  if (SrcSize >= DstSize) {
490    // Generally SrcSize is never greater than DstSize, since this means we are
491    // losing bits. However, this can happen in cases where the structure has
492    // additional padding, for example due to a user specified alignment.
493    //
494    // FIXME: Assert that we aren't truncating non-padding bits when have access
495    // to that information.
496    llvm::Value *Casted =
497      CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
498    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
499    // FIXME: Use better alignment / avoid requiring aligned load.
500    Load->setAlignment(1);
501    return Load;
502  }
503
504  // Otherwise do coercion through memory. This is stupid, but
505  // simple.
506  llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
507  llvm::Value *Casted =
508    CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
509  llvm::StoreInst *Store =
510    CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
511  // FIXME: Use better alignment / avoid requiring aligned store.
512  Store->setAlignment(1);
513  return CGF.Builder.CreateLoad(Tmp);
514}
515
516// Function to store a first-class aggregate into memory.  We prefer to
517// store the elements rather than the aggregate to be more friendly to
518// fast-isel.
519// FIXME: Do we need to recurse here?
520static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
521                          llvm::Value *DestPtr, bool DestIsVolatile,
522                          bool LowAlignment) {
523  // Prefer scalar stores to first-class aggregate stores.
524  if (const llvm::StructType *STy =
525        dyn_cast<llvm::StructType>(Val->getType())) {
526    for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
527      llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i);
528      llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
529      llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr,
530                                                    DestIsVolatile);
531      if (LowAlignment)
532        SI->setAlignment(1);
533    }
534  } else {
535    CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile);
536  }
537}
538
539/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
540/// where the source and destination may have different types.
541///
542/// This safely handles the case when the src type is larger than the
543/// destination type; the upper bits of the src will be lost.
544static void CreateCoercedStore(llvm::Value *Src,
545                               llvm::Value *DstPtr,
546                               bool DstIsVolatile,
547                               CodeGenFunction &CGF) {
548  const llvm::Type *SrcTy = Src->getType();
549  const llvm::Type *DstTy =
550    cast<llvm::PointerType>(DstPtr->getType())->getElementType();
551  if (SrcTy == DstTy) {
552    CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
553    return;
554  }
555
556  uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
557
558  if (const llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
559    DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
560    DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
561  }
562
563  // If the source and destination are integer or pointer types, just do an
564  // extension or truncation to the desired type.
565  if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
566      (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
567    Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
568    CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
569    return;
570  }
571
572  uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
573
574  // If store is legal, just bitcast the src pointer.
575  if (SrcSize <= DstSize) {
576    llvm::Value *Casted =
577      CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
578    // FIXME: Use better alignment / avoid requiring aligned store.
579    BuildAggStore(CGF, Src, Casted, DstIsVolatile, true);
580  } else {
581    // Otherwise do coercion through memory. This is stupid, but
582    // simple.
583
584    // Generally SrcSize is never greater than DstSize, since this means we are
585    // losing bits. However, this can happen in cases where the structure has
586    // additional padding, for example due to a user specified alignment.
587    //
588    // FIXME: Assert that we aren't truncating non-padding bits when have access
589    // to that information.
590    llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
591    CGF.Builder.CreateStore(Src, Tmp);
592    llvm::Value *Casted =
593      CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
594    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
595    // FIXME: Use better alignment / avoid requiring aligned load.
596    Load->setAlignment(1);
597    CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile);
598  }
599}
600
601/***/
602
603bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
604  return FI.getReturnInfo().isIndirect();
605}
606
607bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
608  if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
609    switch (BT->getKind()) {
610    default:
611      return false;
612    case BuiltinType::Float:
613      return getContext().Target.useObjCFPRetForRealType(TargetInfo::Float);
614    case BuiltinType::Double:
615      return getContext().Target.useObjCFPRetForRealType(TargetInfo::Double);
616    case BuiltinType::LongDouble:
617      return getContext().Target.useObjCFPRetForRealType(
618        TargetInfo::LongDouble);
619    }
620  }
621
622  return false;
623}
624
625const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
626  const CGFunctionInfo &FI = getFunctionInfo(GD);
627
628  // For definition purposes, don't consider a K&R function variadic.
629  bool Variadic = false;
630  if (const FunctionProtoType *FPT =
631        cast<FunctionDecl>(GD.getDecl())->getType()->getAs<FunctionProtoType>())
632    Variadic = FPT->isVariadic();
633
634  return GetFunctionType(FI, Variadic, false);
635}
636
637const llvm::FunctionType *
638CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic,
639                              bool isRecursive) {
640  llvm::SmallVector<const llvm::Type*, 8> argTypes;
641  const llvm::Type *resultType = 0;
642
643  const ABIArgInfo &retAI = FI.getReturnInfo();
644  switch (retAI.getKind()) {
645  case ABIArgInfo::Expand:
646    llvm_unreachable("Invalid ABI kind for return argument");
647
648  case ABIArgInfo::Extend:
649  case ABIArgInfo::Direct:
650    resultType = retAI.getCoerceToType();
651    break;
652
653  case ABIArgInfo::Indirect: {
654    assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
655    resultType = llvm::Type::getVoidTy(getLLVMContext());
656
657    QualType ret = FI.getReturnType();
658    const llvm::Type *ty = ConvertType(ret, isRecursive);
659    unsigned addressSpace = Context.getTargetAddressSpace(ret);
660    argTypes.push_back(llvm::PointerType::get(ty, addressSpace));
661    break;
662  }
663
664  case ABIArgInfo::Ignore:
665    resultType = llvm::Type::getVoidTy(getLLVMContext());
666    break;
667  }
668
669  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
670         ie = FI.arg_end(); it != ie; ++it) {
671    const ABIArgInfo &argAI = it->info;
672
673    switch (argAI.getKind()) {
674    case ABIArgInfo::Ignore:
675      break;
676
677    case ABIArgInfo::Indirect: {
678      // indirect arguments are always on the stack, which is addr space #0.
679      const llvm::Type *LTy = ConvertTypeForMem(it->type, isRecursive);
680      argTypes.push_back(LTy->getPointerTo());
681      break;
682    }
683
684    case ABIArgInfo::Extend:
685    case ABIArgInfo::Direct: {
686      // If the coerce-to type is a first class aggregate, flatten it.  Either
687      // way is semantically identical, but fast-isel and the optimizer
688      // generally likes scalar values better than FCAs.
689      const llvm::Type *argType = argAI.getCoerceToType();
690      if (const llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) {
691        for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
692          argTypes.push_back(st->getElementType(i));
693      } else {
694        argTypes.push_back(argType);
695      }
696      break;
697    }
698
699    case ABIArgInfo::Expand:
700      GetExpandedTypes(it->type, argTypes, isRecursive);
701      break;
702    }
703  }
704
705  return llvm::FunctionType::get(resultType, argTypes, isVariadic);
706}
707
708const llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
709  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
710  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
711
712  if (!VerifyFuncTypeComplete(FPT)) {
713    const CGFunctionInfo *Info;
714    if (isa<CXXDestructorDecl>(MD))
715      Info = &getFunctionInfo(cast<CXXDestructorDecl>(MD), GD.getDtorType());
716    else
717      Info = &getFunctionInfo(MD);
718    return GetFunctionType(*Info, FPT->isVariadic(), false);
719  }
720
721  return llvm::OpaqueType::get(getLLVMContext());
722}
723
724void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
725                                           const Decl *TargetDecl,
726                                           AttributeListType &PAL,
727                                           unsigned &CallingConv) {
728  unsigned FuncAttrs = 0;
729  unsigned RetAttrs = 0;
730
731  CallingConv = FI.getEffectiveCallingConvention();
732
733  if (FI.isNoReturn())
734    FuncAttrs |= llvm::Attribute::NoReturn;
735
736  // FIXME: handle sseregparm someday...
737  if (TargetDecl) {
738    if (TargetDecl->hasAttr<NoThrowAttr>())
739      FuncAttrs |= llvm::Attribute::NoUnwind;
740    else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
741      const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
742      if (FPT && FPT->isNothrow(getContext()))
743        FuncAttrs |= llvm::Attribute::NoUnwind;
744    }
745
746    if (TargetDecl->hasAttr<NoReturnAttr>())
747      FuncAttrs |= llvm::Attribute::NoReturn;
748    if (TargetDecl->hasAttr<ConstAttr>())
749      FuncAttrs |= llvm::Attribute::ReadNone;
750    else if (TargetDecl->hasAttr<PureAttr>())
751      FuncAttrs |= llvm::Attribute::ReadOnly;
752    if (TargetDecl->hasAttr<MallocAttr>())
753      RetAttrs |= llvm::Attribute::NoAlias;
754  }
755
756  if (CodeGenOpts.OptimizeSize)
757    FuncAttrs |= llvm::Attribute::OptimizeForSize;
758  if (CodeGenOpts.DisableRedZone)
759    FuncAttrs |= llvm::Attribute::NoRedZone;
760  if (CodeGenOpts.NoImplicitFloat)
761    FuncAttrs |= llvm::Attribute::NoImplicitFloat;
762
763  QualType RetTy = FI.getReturnType();
764  unsigned Index = 1;
765  const ABIArgInfo &RetAI = FI.getReturnInfo();
766  switch (RetAI.getKind()) {
767  case ABIArgInfo::Extend:
768   if (RetTy->hasSignedIntegerRepresentation())
769     RetAttrs |= llvm::Attribute::SExt;
770   else if (RetTy->hasUnsignedIntegerRepresentation())
771     RetAttrs |= llvm::Attribute::ZExt;
772    break;
773  case ABIArgInfo::Direct:
774  case ABIArgInfo::Ignore:
775    break;
776
777  case ABIArgInfo::Indirect:
778    PAL.push_back(llvm::AttributeWithIndex::get(Index,
779                                                llvm::Attribute::StructRet));
780    ++Index;
781    // sret disables readnone and readonly
782    FuncAttrs &= ~(llvm::Attribute::ReadOnly |
783                   llvm::Attribute::ReadNone);
784    break;
785
786  case ABIArgInfo::Expand:
787    assert(0 && "Invalid ABI kind for return argument");
788  }
789
790  if (RetAttrs)
791    PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
792
793  // FIXME: RegParm should be reduced in case of global register variable.
794  signed RegParm;
795  if (FI.getHasRegParm())
796    RegParm = FI.getRegParm();
797  else
798    RegParm = CodeGenOpts.NumRegisterParameters;
799
800  unsigned PointerWidth = getContext().Target.getPointerWidth(0);
801  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
802         ie = FI.arg_end(); it != ie; ++it) {
803    QualType ParamType = it->type;
804    const ABIArgInfo &AI = it->info;
805    unsigned Attributes = 0;
806
807    // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
808    // have the corresponding parameter variable.  It doesn't make
809    // sense to do it here because parameters are so messed up.
810    switch (AI.getKind()) {
811    case ABIArgInfo::Extend:
812      if (ParamType->isSignedIntegerOrEnumerationType())
813        Attributes |= llvm::Attribute::SExt;
814      else if (ParamType->isUnsignedIntegerOrEnumerationType())
815        Attributes |= llvm::Attribute::ZExt;
816      // FALL THROUGH
817    case ABIArgInfo::Direct:
818      if (RegParm > 0 &&
819          (ParamType->isIntegerType() || ParamType->isPointerType())) {
820        RegParm -=
821        (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
822        if (RegParm >= 0)
823          Attributes |= llvm::Attribute::InReg;
824      }
825      // FIXME: handle sseregparm someday...
826
827      if (const llvm::StructType *STy =
828            dyn_cast<llvm::StructType>(AI.getCoerceToType()))
829        Index += STy->getNumElements()-1;  // 1 will be added below.
830      break;
831
832    case ABIArgInfo::Indirect:
833      if (AI.getIndirectByVal())
834        Attributes |= llvm::Attribute::ByVal;
835
836      Attributes |=
837        llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
838      // byval disables readnone and readonly.
839      FuncAttrs &= ~(llvm::Attribute::ReadOnly |
840                     llvm::Attribute::ReadNone);
841      break;
842
843    case ABIArgInfo::Ignore:
844      // Skip increment, no matching LLVM parameter.
845      continue;
846
847    case ABIArgInfo::Expand: {
848      llvm::SmallVector<const llvm::Type*, 8> types;
849      // FIXME: This is rather inefficient. Do we ever actually need to do
850      // anything here? The result should be just reconstructed on the other
851      // side, so extension should be a non-issue.
852      getTypes().GetExpandedTypes(ParamType, types, false);
853      Index += types.size();
854      continue;
855    }
856    }
857
858    if (Attributes)
859      PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
860    ++Index;
861  }
862  if (FuncAttrs)
863    PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
864}
865
866/// An argument came in as a promoted argument; demote it back to its
867/// declared type.
868static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
869                                         const VarDecl *var,
870                                         llvm::Value *value) {
871  const llvm::Type *varType = CGF.ConvertType(var->getType());
872
873  // This can happen with promotions that actually don't change the
874  // underlying type, like the enum promotions.
875  if (value->getType() == varType) return value;
876
877  assert((varType->isIntegerTy() || varType->isFloatingPointTy())
878         && "unexpected promotion type");
879
880  if (isa<llvm::IntegerType>(varType))
881    return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
882
883  return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
884}
885
886void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
887                                         llvm::Function *Fn,
888                                         const FunctionArgList &Args) {
889  // If this is an implicit-return-zero function, go ahead and
890  // initialize the return value.  TODO: it might be nice to have
891  // a more general mechanism for this that didn't require synthesized
892  // return statements.
893  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
894    if (FD->hasImplicitReturnZero()) {
895      QualType RetTy = FD->getResultType().getUnqualifiedType();
896      const llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
897      llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
898      Builder.CreateStore(Zero, ReturnValue);
899    }
900  }
901
902  // FIXME: We no longer need the types from FunctionArgList; lift up and
903  // simplify.
904
905  // Emit allocs for param decls.  Give the LLVM Argument nodes names.
906  llvm::Function::arg_iterator AI = Fn->arg_begin();
907
908  // Name the struct return argument.
909  if (CGM.ReturnTypeUsesSRet(FI)) {
910    AI->setName("agg.result");
911    ++AI;
912  }
913
914  assert(FI.arg_size() == Args.size() &&
915         "Mismatch between function signature & arguments.");
916  unsigned ArgNo = 1;
917  CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
918  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
919       i != e; ++i, ++info_it, ++ArgNo) {
920    const VarDecl *Arg = *i;
921    QualType Ty = info_it->type;
922    const ABIArgInfo &ArgI = info_it->info;
923
924    bool isPromoted =
925      isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
926
927    switch (ArgI.getKind()) {
928    case ABIArgInfo::Indirect: {
929      llvm::Value *V = AI;
930
931      if (hasAggregateLLVMType(Ty)) {
932        // Aggregates and complex variables are accessed by reference.  All we
933        // need to do is realign the value, if requested
934        if (ArgI.getIndirectRealign()) {
935          llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
936
937          // Copy from the incoming argument pointer to the temporary with the
938          // appropriate alignment.
939          //
940          // FIXME: We should have a common utility for generating an aggregate
941          // copy.
942          const llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
943          CharUnits Size = getContext().getTypeSizeInChars(Ty);
944          llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
945          llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
946          Builder.CreateMemCpy(Dst,
947                               Src,
948                               llvm::ConstantInt::get(IntPtrTy,
949                                                      Size.getQuantity()),
950                               ArgI.getIndirectAlign(),
951                               false);
952          V = AlignedTemp;
953        }
954      } else {
955        // Load scalar value from indirect argument.
956        CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
957        V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty);
958
959        if (isPromoted)
960          V = emitArgumentDemotion(*this, Arg, V);
961      }
962      EmitParmDecl(*Arg, V, ArgNo);
963      break;
964    }
965
966    case ABIArgInfo::Extend:
967    case ABIArgInfo::Direct: {
968      // If we have the trivial case, handle it with no muss and fuss.
969      if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
970          ArgI.getCoerceToType() == ConvertType(Ty) &&
971          ArgI.getDirectOffset() == 0) {
972        assert(AI != Fn->arg_end() && "Argument mismatch!");
973        llvm::Value *V = AI;
974
975        if (Arg->getType().isRestrictQualified())
976          AI->addAttr(llvm::Attribute::NoAlias);
977
978        if (isPromoted)
979          V = emitArgumentDemotion(*this, Arg, V);
980
981        EmitParmDecl(*Arg, V, ArgNo);
982        break;
983      }
984
985      llvm::AllocaInst *Alloca = CreateMemTemp(Ty, "coerce");
986
987      // The alignment we need to use is the max of the requested alignment for
988      // the argument plus the alignment required by our access code below.
989      unsigned AlignmentToUse =
990        CGM.getTargetData().getABITypeAlignment(ArgI.getCoerceToType());
991      AlignmentToUse = std::max(AlignmentToUse,
992                        (unsigned)getContext().getDeclAlign(Arg).getQuantity());
993
994      Alloca->setAlignment(AlignmentToUse);
995      llvm::Value *V = Alloca;
996      llvm::Value *Ptr = V;    // Pointer to store into.
997
998      // If the value is offset in memory, apply the offset now.
999      if (unsigned Offs = ArgI.getDirectOffset()) {
1000        Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
1001        Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
1002        Ptr = Builder.CreateBitCast(Ptr,
1003                          llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
1004      }
1005
1006      // If the coerce-to type is a first class aggregate, we flatten it and
1007      // pass the elements. Either way is semantically identical, but fast-isel
1008      // and the optimizer generally likes scalar values better than FCAs.
1009      if (const llvm::StructType *STy =
1010            dyn_cast<llvm::StructType>(ArgI.getCoerceToType())) {
1011        Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
1012
1013        for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1014          assert(AI != Fn->arg_end() && "Argument mismatch!");
1015          AI->setName(Arg->getName() + ".coerce" + llvm::Twine(i));
1016          llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
1017          Builder.CreateStore(AI++, EltPtr);
1018        }
1019      } else {
1020        // Simple case, just do a coerced store of the argument into the alloca.
1021        assert(AI != Fn->arg_end() && "Argument mismatch!");
1022        AI->setName(Arg->getName() + ".coerce");
1023        CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
1024      }
1025
1026
1027      // Match to what EmitParmDecl is expecting for this type.
1028      if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
1029        V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty);
1030        if (isPromoted)
1031          V = emitArgumentDemotion(*this, Arg, V);
1032      }
1033      EmitParmDecl(*Arg, V, ArgNo);
1034      continue;  // Skip ++AI increment, already done.
1035    }
1036
1037    case ABIArgInfo::Expand: {
1038      // If this structure was expanded into multiple arguments then
1039      // we need to create a temporary and reconstruct it from the
1040      // arguments.
1041      llvm::Value *Temp = CreateMemTemp(Ty, Arg->getName() + ".addr");
1042      llvm::Function::arg_iterator End =
1043        ExpandTypeFromArgs(Ty, MakeAddrLValue(Temp, Ty), AI);
1044      EmitParmDecl(*Arg, Temp, ArgNo);
1045
1046      // Name the arguments used in expansion and increment AI.
1047      unsigned Index = 0;
1048      for (; AI != End; ++AI, ++Index)
1049        AI->setName(Arg->getName() + "." + llvm::Twine(Index));
1050      continue;
1051    }
1052
1053    case ABIArgInfo::Ignore:
1054      // Initialize the local variable appropriately.
1055      if (hasAggregateLLVMType(Ty))
1056        EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo);
1057      else
1058        EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())),
1059                     ArgNo);
1060
1061      // Skip increment, no matching LLVM parameter.
1062      continue;
1063    }
1064
1065    ++AI;
1066  }
1067  assert(AI == Fn->arg_end() && "Argument mismatch!");
1068}
1069
1070void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
1071  // Functions with no result always return void.
1072  if (ReturnValue == 0) {
1073    Builder.CreateRetVoid();
1074    return;
1075  }
1076
1077  llvm::DebugLoc RetDbgLoc;
1078  llvm::Value *RV = 0;
1079  QualType RetTy = FI.getReturnType();
1080  const ABIArgInfo &RetAI = FI.getReturnInfo();
1081
1082  switch (RetAI.getKind()) {
1083  case ABIArgInfo::Indirect: {
1084    unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
1085    if (RetTy->isAnyComplexType()) {
1086      ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
1087      StoreComplexToAddr(RT, CurFn->arg_begin(), false);
1088    } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1089      // Do nothing; aggregrates get evaluated directly into the destination.
1090    } else {
1091      EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
1092                        false, Alignment, RetTy);
1093    }
1094    break;
1095  }
1096
1097  case ABIArgInfo::Extend:
1098  case ABIArgInfo::Direct:
1099    if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
1100        RetAI.getDirectOffset() == 0) {
1101      // The internal return value temp always will have pointer-to-return-type
1102      // type, just do a load.
1103
1104      // If the instruction right before the insertion point is a store to the
1105      // return value, we can elide the load, zap the store, and usually zap the
1106      // alloca.
1107      llvm::BasicBlock *InsertBB = Builder.GetInsertBlock();
1108      llvm::StoreInst *SI = 0;
1109      if (InsertBB->empty() ||
1110          !(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) ||
1111          SI->getPointerOperand() != ReturnValue || SI->isVolatile()) {
1112        RV = Builder.CreateLoad(ReturnValue);
1113      } else {
1114        // Get the stored value and nuke the now-dead store.
1115        RetDbgLoc = SI->getDebugLoc();
1116        RV = SI->getValueOperand();
1117        SI->eraseFromParent();
1118
1119        // If that was the only use of the return value, nuke it as well now.
1120        if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
1121          cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
1122          ReturnValue = 0;
1123        }
1124      }
1125    } else {
1126      llvm::Value *V = ReturnValue;
1127      // If the value is offset in memory, apply the offset now.
1128      if (unsigned Offs = RetAI.getDirectOffset()) {
1129        V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
1130        V = Builder.CreateConstGEP1_32(V, Offs);
1131        V = Builder.CreateBitCast(V,
1132                         llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
1133      }
1134
1135      RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
1136    }
1137    break;
1138
1139  case ABIArgInfo::Ignore:
1140    break;
1141
1142  case ABIArgInfo::Expand:
1143    assert(0 && "Invalid ABI kind for return argument");
1144  }
1145
1146  llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
1147  if (!RetDbgLoc.isUnknown())
1148    Ret->setDebugLoc(RetDbgLoc);
1149}
1150
1151void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
1152                                          const VarDecl *param) {
1153  // StartFunction converted the ABI-lowered parameter(s) into a
1154  // local alloca.  We need to turn that into an r-value suitable
1155  // for EmitCall.
1156  llvm::Value *local = GetAddrOfLocalVar(param);
1157
1158  QualType type = param->getType();
1159
1160  // For the most part, we just need to load the alloca, except:
1161  // 1) aggregate r-values are actually pointers to temporaries, and
1162  // 2) references to aggregates are pointers directly to the aggregate.
1163  // I don't know why references to non-aggregates are different here.
1164  if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
1165    if (hasAggregateLLVMType(ref->getPointeeType()))
1166      return args.add(RValue::getAggregate(local), type);
1167
1168    // Locals which are references to scalars are represented
1169    // with allocas holding the pointer.
1170    return args.add(RValue::get(Builder.CreateLoad(local)), type);
1171  }
1172
1173  if (type->isAnyComplexType()) {
1174    ComplexPairTy complex = LoadComplexFromAddr(local, /*volatile*/ false);
1175    return args.add(RValue::getComplex(complex), type);
1176  }
1177
1178  if (hasAggregateLLVMType(type))
1179    return args.add(RValue::getAggregate(local), type);
1180
1181  unsigned alignment = getContext().getDeclAlign(param).getQuantity();
1182  llvm::Value *value = EmitLoadOfScalar(local, false, alignment, type);
1183  return args.add(RValue::get(value), type);
1184}
1185
1186void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
1187                                  QualType type) {
1188  if (type->isReferenceType())
1189    return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0),
1190                    type);
1191
1192  args.add(EmitAnyExprToTemp(E), type);
1193}
1194
1195/// Emits a call or invoke instruction to the given function, depending
1196/// on the current state of the EH stack.
1197llvm::CallSite
1198CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
1199                                  llvm::Value * const *ArgBegin,
1200                                  llvm::Value * const *ArgEnd,
1201                                  const llvm::Twine &Name) {
1202  llvm::BasicBlock *InvokeDest = getInvokeDest();
1203  if (!InvokeDest)
1204    return Builder.CreateCall(Callee, ArgBegin, ArgEnd, Name);
1205
1206  llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
1207  llvm::InvokeInst *Invoke = Builder.CreateInvoke(Callee, ContBB, InvokeDest,
1208                                                  ArgBegin, ArgEnd, Name);
1209  EmitBlock(ContBB);
1210  return Invoke;
1211}
1212
1213RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
1214                                 llvm::Value *Callee,
1215                                 ReturnValueSlot ReturnValue,
1216                                 const CallArgList &CallArgs,
1217                                 const Decl *TargetDecl,
1218                                 llvm::Instruction **callOrInvoke) {
1219  // FIXME: We no longer need the types from CallArgs; lift up and simplify.
1220  llvm::SmallVector<llvm::Value*, 16> Args;
1221
1222  // Handle struct-return functions by passing a pointer to the
1223  // location that we would like to return into.
1224  QualType RetTy = CallInfo.getReturnType();
1225  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
1226
1227
1228  // If the call returns a temporary with struct return, create a temporary
1229  // alloca to hold the result, unless one is given to us.
1230  if (CGM.ReturnTypeUsesSRet(CallInfo)) {
1231    llvm::Value *Value = ReturnValue.getValue();
1232    if (!Value)
1233      Value = CreateMemTemp(RetTy);
1234    Args.push_back(Value);
1235  }
1236
1237  assert(CallInfo.arg_size() == CallArgs.size() &&
1238         "Mismatch between function signature & arguments.");
1239  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
1240  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
1241       I != E; ++I, ++info_it) {
1242    const ABIArgInfo &ArgInfo = info_it->info;
1243    RValue RV = I->RV;
1244
1245    unsigned Alignment =
1246      getContext().getTypeAlignInChars(I->Ty).getQuantity();
1247    switch (ArgInfo.getKind()) {
1248    case ABIArgInfo::Indirect: {
1249      if (RV.isScalar() || RV.isComplex()) {
1250        // Make a temporary alloca to pass the argument.
1251        Args.push_back(CreateMemTemp(I->Ty));
1252        if (RV.isScalar())
1253          EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false,
1254                            Alignment, I->Ty);
1255        else
1256          StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
1257      } else {
1258        Args.push_back(RV.getAggregateAddr());
1259      }
1260      break;
1261    }
1262
1263    case ABIArgInfo::Ignore:
1264      break;
1265
1266    case ABIArgInfo::Extend:
1267    case ABIArgInfo::Direct: {
1268      if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
1269          ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
1270          ArgInfo.getDirectOffset() == 0) {
1271        if (RV.isScalar())
1272          Args.push_back(RV.getScalarVal());
1273        else
1274          Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
1275        break;
1276      }
1277
1278      // FIXME: Avoid the conversion through memory if possible.
1279      llvm::Value *SrcPtr;
1280      if (RV.isScalar()) {
1281        SrcPtr = CreateMemTemp(I->Ty, "coerce");
1282        EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, Alignment, I->Ty);
1283      } else if (RV.isComplex()) {
1284        SrcPtr = CreateMemTemp(I->Ty, "coerce");
1285        StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
1286      } else
1287        SrcPtr = RV.getAggregateAddr();
1288
1289      // If the value is offset in memory, apply the offset now.
1290      if (unsigned Offs = ArgInfo.getDirectOffset()) {
1291        SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
1292        SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
1293        SrcPtr = Builder.CreateBitCast(SrcPtr,
1294                       llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
1295
1296      }
1297
1298      // If the coerce-to type is a first class aggregate, we flatten it and
1299      // pass the elements. Either way is semantically identical, but fast-isel
1300      // and the optimizer generally likes scalar values better than FCAs.
1301      if (const llvm::StructType *STy =
1302            dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
1303        SrcPtr = Builder.CreateBitCast(SrcPtr,
1304                                       llvm::PointerType::getUnqual(STy));
1305        for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1306          llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
1307          llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
1308          // We don't know what we're loading from.
1309          LI->setAlignment(1);
1310          Args.push_back(LI);
1311        }
1312      } else {
1313        // In the simple case, just pass the coerced loaded value.
1314        Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
1315                                         *this));
1316      }
1317
1318      break;
1319    }
1320
1321    case ABIArgInfo::Expand:
1322      ExpandTypeToArgs(I->Ty, RV, Args);
1323      break;
1324    }
1325  }
1326
1327  // If the callee is a bitcast of a function to a varargs pointer to function
1328  // type, check to see if we can remove the bitcast.  This handles some cases
1329  // with unprototyped functions.
1330  if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
1331    if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
1332      const llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
1333      const llvm::FunctionType *CurFT =
1334        cast<llvm::FunctionType>(CurPT->getElementType());
1335      const llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
1336
1337      if (CE->getOpcode() == llvm::Instruction::BitCast &&
1338          ActualFT->getReturnType() == CurFT->getReturnType() &&
1339          ActualFT->getNumParams() == CurFT->getNumParams() &&
1340          ActualFT->getNumParams() == Args.size() &&
1341          (CurFT->isVarArg() || !ActualFT->isVarArg())) {
1342        bool ArgsMatch = true;
1343        for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
1344          if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
1345            ArgsMatch = false;
1346            break;
1347          }
1348
1349        // Strip the cast if we can get away with it.  This is a nice cleanup,
1350        // but also allows us to inline the function at -O0 if it is marked
1351        // always_inline.
1352        if (ArgsMatch)
1353          Callee = CalleeF;
1354      }
1355    }
1356
1357
1358  unsigned CallingConv;
1359  CodeGen::AttributeListType AttributeList;
1360  CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
1361  llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
1362                                                   AttributeList.end());
1363
1364  llvm::BasicBlock *InvokeDest = 0;
1365  if (!(Attrs.getFnAttributes() & llvm::Attribute::NoUnwind))
1366    InvokeDest = getInvokeDest();
1367
1368  llvm::CallSite CS;
1369  if (!InvokeDest) {
1370    CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
1371  } else {
1372    llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
1373    CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
1374                              Args.data(), Args.data()+Args.size());
1375    EmitBlock(Cont);
1376  }
1377  if (callOrInvoke)
1378    *callOrInvoke = CS.getInstruction();
1379
1380  CS.setAttributes(Attrs);
1381  CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
1382
1383  // If the call doesn't return, finish the basic block and clear the
1384  // insertion point; this allows the rest of IRgen to discard
1385  // unreachable code.
1386  if (CS.doesNotReturn()) {
1387    Builder.CreateUnreachable();
1388    Builder.ClearInsertionPoint();
1389
1390    // FIXME: For now, emit a dummy basic block because expr emitters in
1391    // generally are not ready to handle emitting expressions at unreachable
1392    // points.
1393    EnsureInsertPoint();
1394
1395    // Return a reasonable RValue.
1396    return GetUndefRValue(RetTy);
1397  }
1398
1399  llvm::Instruction *CI = CS.getInstruction();
1400  if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
1401    CI->setName("call");
1402
1403  switch (RetAI.getKind()) {
1404  case ABIArgInfo::Indirect: {
1405    unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
1406    if (RetTy->isAnyComplexType())
1407      return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
1408    if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1409      return RValue::getAggregate(Args[0]);
1410    return RValue::get(EmitLoadOfScalar(Args[0], false, Alignment, RetTy));
1411  }
1412
1413  case ABIArgInfo::Ignore:
1414    // If we are ignoring an argument that had a result, make sure to
1415    // construct the appropriate return value for our caller.
1416    return GetUndefRValue(RetTy);
1417
1418  case ABIArgInfo::Extend:
1419  case ABIArgInfo::Direct: {
1420    if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
1421        RetAI.getDirectOffset() == 0) {
1422      if (RetTy->isAnyComplexType()) {
1423        llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
1424        llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
1425        return RValue::getComplex(std::make_pair(Real, Imag));
1426      }
1427      if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1428        llvm::Value *DestPtr = ReturnValue.getValue();
1429        bool DestIsVolatile = ReturnValue.isVolatile();
1430
1431        if (!DestPtr) {
1432          DestPtr = CreateMemTemp(RetTy, "agg.tmp");
1433          DestIsVolatile = false;
1434        }
1435        BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
1436        return RValue::getAggregate(DestPtr);
1437      }
1438      return RValue::get(CI);
1439    }
1440
1441    llvm::Value *DestPtr = ReturnValue.getValue();
1442    bool DestIsVolatile = ReturnValue.isVolatile();
1443
1444    if (!DestPtr) {
1445      DestPtr = CreateMemTemp(RetTy, "coerce");
1446      DestIsVolatile = false;
1447    }
1448
1449    // If the value is offset in memory, apply the offset now.
1450    llvm::Value *StorePtr = DestPtr;
1451    if (unsigned Offs = RetAI.getDirectOffset()) {
1452      StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
1453      StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
1454      StorePtr = Builder.CreateBitCast(StorePtr,
1455                         llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
1456    }
1457    CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
1458
1459    unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
1460    if (RetTy->isAnyComplexType())
1461      return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
1462    if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1463      return RValue::getAggregate(DestPtr);
1464    return RValue::get(EmitLoadOfScalar(DestPtr, false, Alignment, RetTy));
1465  }
1466
1467  case ABIArgInfo::Expand:
1468    assert(0 && "Invalid ABI kind for return argument");
1469  }
1470
1471  assert(0 && "Unhandled ABIArgInfo::Kind");
1472  return RValue::get(0);
1473}
1474
1475/* VarArg handling */
1476
1477llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
1478  return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
1479}
1480