CGCall.cpp revision 121b3facb4e0585d23766f9c1e4fdf9018a4b217
1//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// These classes wrap the information about a call or function
11// definition used to handle ABI compliancy.
12//
13//===----------------------------------------------------------------------===//
14
15#include "CGCall.h"
16#include "ABIInfo.h"
17#include "CodeGenFunction.h"
18#include "CodeGenModule.h"
19#include "clang/Basic/TargetInfo.h"
20#include "clang/AST/Decl.h"
21#include "clang/AST/DeclCXX.h"
22#include "clang/AST/DeclObjC.h"
23#include "clang/Frontend/CodeGenOptions.h"
24#include "llvm/Attributes.h"
25#include "llvm/Support/CallSite.h"
26#include "llvm/Target/TargetData.h"
27using namespace clang;
28using namespace CodeGen;
29
30/***/
31
32static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
33  switch (CC) {
34  default: return llvm::CallingConv::C;
35  case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
36  case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
37  case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
38  }
39}
40
41/// Derives the 'this' type for codegen purposes, i.e. ignoring method
42/// qualification.
43/// FIXME: address space qualification?
44static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
45  QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
46  return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
47}
48
49/// Returns the canonical formal type of the given C++ method.
50static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
51  return MD->getType()->getCanonicalTypeUnqualified()
52           .getAs<FunctionProtoType>();
53}
54
55/// Returns the "extra-canonicalized" return type, which discards
56/// qualifiers on the return type.  Codegen doesn't care about them,
57/// and it makes ABI code a little easier to be able to assume that
58/// all parameter and return types are top-level unqualified.
59static CanQualType GetReturnType(QualType RetTy) {
60  return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
61}
62
63const CGFunctionInfo &
64CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP,
65                              bool IsRecursive) {
66  return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
67                         llvm::SmallVector<CanQualType, 16>(),
68                         FTNP->getExtInfo(), IsRecursive);
69}
70
71/// \param Args - contains any initial parameters besides those
72///   in the formal type
73static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT,
74                                  llvm::SmallVectorImpl<CanQualType> &ArgTys,
75                                             CanQual<FunctionProtoType> FTP,
76                                             bool IsRecursive = false) {
77  // FIXME: Kill copy.
78  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
79    ArgTys.push_back(FTP->getArgType(i));
80  CanQualType ResTy = FTP->getResultType().getUnqualifiedType();
81  return CGT.getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo(), IsRecursive);
82}
83
84const CGFunctionInfo &
85CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP,
86                              bool IsRecursive) {
87  llvm::SmallVector<CanQualType, 16> ArgTys;
88  return ::getFunctionInfo(*this, ArgTys, FTP, IsRecursive);
89}
90
91static CallingConv getCallingConventionForDecl(const Decl *D) {
92  // Set the appropriate calling convention for the Function.
93  if (D->hasAttr<StdCallAttr>())
94    return CC_X86StdCall;
95
96  if (D->hasAttr<FastCallAttr>())
97    return CC_X86FastCall;
98
99  if (D->hasAttr<ThisCallAttr>())
100    return CC_X86ThisCall;
101
102  return CC_C;
103}
104
105const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD,
106                                                 const FunctionProtoType *FTP) {
107  llvm::SmallVector<CanQualType, 16> ArgTys;
108
109  // Add the 'this' pointer.
110  ArgTys.push_back(GetThisType(Context, RD));
111
112  return ::getFunctionInfo(*this, ArgTys,
113              FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
114}
115
116const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
117  llvm::SmallVector<CanQualType, 16> ArgTys;
118
119  // Add the 'this' pointer unless this is a static method.
120  if (MD->isInstance())
121    ArgTys.push_back(GetThisType(Context, MD->getParent()));
122
123  return ::getFunctionInfo(*this, ArgTys, GetFormalType(MD));
124}
125
126const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
127                                                    CXXCtorType Type) {
128  llvm::SmallVector<CanQualType, 16> ArgTys;
129
130  // Add the 'this' pointer.
131  ArgTys.push_back(GetThisType(Context, D->getParent()));
132
133  // Check if we need to add a VTT parameter (which has type void **).
134  if (Type == Ctor_Base && D->getParent()->getNumVBases() != 0)
135    ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
136
137  return ::getFunctionInfo(*this, ArgTys, GetFormalType(D));
138}
139
140const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXDestructorDecl *D,
141                                                    CXXDtorType Type) {
142  llvm::SmallVector<CanQualType, 16> ArgTys;
143
144  // Add the 'this' pointer.
145  ArgTys.push_back(GetThisType(Context, D->getParent()));
146
147  // Check if we need to add a VTT parameter (which has type void **).
148  if (Type == Dtor_Base && D->getParent()->getNumVBases() != 0)
149    ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
150
151  return ::getFunctionInfo(*this, ArgTys, GetFormalType(D));
152}
153
154const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
155  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
156    if (MD->isInstance())
157      return getFunctionInfo(MD);
158
159  CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
160  assert(isa<FunctionType>(FTy));
161  if (isa<FunctionNoProtoType>(FTy))
162    return getFunctionInfo(FTy.getAs<FunctionNoProtoType>());
163  assert(isa<FunctionProtoType>(FTy));
164  return getFunctionInfo(FTy.getAs<FunctionProtoType>());
165}
166
167const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
168  llvm::SmallVector<CanQualType, 16> ArgTys;
169  ArgTys.push_back(Context.getCanonicalParamType(MD->getSelfDecl()->getType()));
170  ArgTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
171  // FIXME: Kill copy?
172  for (ObjCMethodDecl::param_iterator i = MD->param_begin(),
173         e = MD->param_end(); i != e; ++i) {
174    ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
175  }
176  return getFunctionInfo(GetReturnType(MD->getResultType()),
177                         ArgTys,
178                         FunctionType::ExtInfo(
179                             /*NoReturn*/ false,
180                             /*RegParm*/ 0,
181                             getCallingConventionForDecl(MD)));
182}
183
184const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) {
185  // FIXME: Do we need to handle ObjCMethodDecl?
186  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
187
188  if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
189    return getFunctionInfo(CD, GD.getCtorType());
190
191  if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
192    return getFunctionInfo(DD, GD.getDtorType());
193
194  return getFunctionInfo(FD);
195}
196
197const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
198                                                    const CallArgList &Args,
199                                            const FunctionType::ExtInfo &Info) {
200  // FIXME: Kill copy.
201  llvm::SmallVector<CanQualType, 16> ArgTys;
202  for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
203       i != e; ++i)
204    ArgTys.push_back(Context.getCanonicalParamType(i->second));
205  return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
206}
207
208const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
209                                                    const FunctionArgList &Args,
210                                            const FunctionType::ExtInfo &Info) {
211  // FIXME: Kill copy.
212  llvm::SmallVector<CanQualType, 16> ArgTys;
213  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
214       i != e; ++i)
215    ArgTys.push_back(Context.getCanonicalParamType(i->second));
216  return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
217}
218
219const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
220                           const llvm::SmallVectorImpl<CanQualType> &ArgTys,
221                                            const FunctionType::ExtInfo &Info,
222                                                    bool IsRecursive) {
223#ifndef NDEBUG
224  for (llvm::SmallVectorImpl<CanQualType>::const_iterator
225         I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I)
226    assert(I->isCanonicalAsParam());
227#endif
228
229  unsigned CC = ClangCallConvToLLVMCallConv(Info.getCC());
230
231  // Lookup or create unique function info.
232  llvm::FoldingSetNodeID ID;
233  CGFunctionInfo::Profile(ID, Info, ResTy,
234                          ArgTys.begin(), ArgTys.end());
235
236  void *InsertPos = 0;
237  CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
238  if (FI)
239    return *FI;
240
241  // Construct the function info.
242  FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getRegParm(), ResTy,
243                          ArgTys.data(), ArgTys.size());
244  FunctionInfos.InsertNode(FI, InsertPos);
245
246  // ABI lowering wants to know what our preferred type for the argument is in
247  // various situations, pass it in.
248  llvm::SmallVector<const llvm::Type *, 8> PreferredArgTypes;
249  for (llvm::SmallVectorImpl<CanQualType>::const_iterator
250       I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I) {
251    // If this is being called from the guts of the ConvertType loop, make sure
252    // to call ConvertTypeRecursive so we don't get into issues with cyclic
253    // pointer type structures.
254    PreferredArgTypes.push_back(ConvertTypeRecursive(*I));
255  }
256
257  // Compute ABI information.
258  getABIInfo().computeInfo(*FI, getContext(), TheModule.getContext(),
259                           PreferredArgTypes.data(), PreferredArgTypes.size());
260
261  // If this is a top-level call and ConvertTypeRecursive hit unresolved pointer
262  // types, resolve them now.  These pointers may point to this function, which
263  // we *just* filled in the FunctionInfo for.
264  if (!IsRecursive && !PointersToResolve.empty()) {
265    // Use PATypeHolder's so that our preferred types don't dangle under
266    // refinement.
267    llvm::SmallVector<llvm::PATypeHolder, 8> Handles(PreferredArgTypes.begin(),
268                                                     PreferredArgTypes.end());
269    HandleLateResolvedPointers();
270    PreferredArgTypes.clear();
271    PreferredArgTypes.append(Handles.begin(), Handles.end());
272  }
273
274
275  return *FI;
276}
277
278CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
279                               bool _NoReturn, unsigned _RegParm,
280                               CanQualType ResTy,
281                               const CanQualType *ArgTys,
282                               unsigned NumArgTys)
283  : CallingConvention(_CallingConvention),
284    EffectiveCallingConvention(_CallingConvention),
285    NoReturn(_NoReturn), RegParm(_RegParm)
286{
287  NumArgs = NumArgTys;
288
289  // FIXME: Coallocate with the CGFunctionInfo object.
290  Args = new ArgInfo[1 + NumArgTys];
291  Args[0].type = ResTy;
292  for (unsigned i = 0; i != NumArgTys; ++i)
293    Args[1 + i].type = ArgTys[i];
294}
295
296/***/
297
298void CodeGenTypes::GetExpandedTypes(QualType Ty,
299                                    std::vector<const llvm::Type*> &ArgTys,
300                                    bool IsRecursive) {
301  const RecordType *RT = Ty->getAsStructureType();
302  assert(RT && "Can only expand structure types.");
303  const RecordDecl *RD = RT->getDecl();
304  assert(!RD->hasFlexibleArrayMember() &&
305         "Cannot expand structure with flexible array.");
306
307  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
308         i != e; ++i) {
309    const FieldDecl *FD = *i;
310    assert(!FD->isBitField() &&
311           "Cannot expand structure with bit-field members.");
312
313    QualType FT = FD->getType();
314    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
315      GetExpandedTypes(FT, ArgTys, IsRecursive);
316    } else {
317      ArgTys.push_back(ConvertType(FT, IsRecursive));
318    }
319  }
320}
321
322llvm::Function::arg_iterator
323CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
324                                    llvm::Function::arg_iterator AI) {
325  const RecordType *RT = Ty->getAsStructureType();
326  assert(RT && "Can only expand structure types.");
327
328  RecordDecl *RD = RT->getDecl();
329  assert(LV.isSimple() &&
330         "Unexpected non-simple lvalue during struct expansion.");
331  llvm::Value *Addr = LV.getAddress();
332  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
333         i != e; ++i) {
334    FieldDecl *FD = *i;
335    QualType FT = FD->getType();
336
337    // FIXME: What are the right qualifiers here?
338    LValue LV = EmitLValueForField(Addr, FD, 0);
339    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
340      AI = ExpandTypeFromArgs(FT, LV, AI);
341    } else {
342      EmitStoreThroughLValue(RValue::get(AI), LV, FT);
343      ++AI;
344    }
345  }
346
347  return AI;
348}
349
350void
351CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
352                                  llvm::SmallVector<llvm::Value*, 16> &Args) {
353  const RecordType *RT = Ty->getAsStructureType();
354  assert(RT && "Can only expand structure types.");
355
356  RecordDecl *RD = RT->getDecl();
357  assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
358  llvm::Value *Addr = RV.getAggregateAddr();
359  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
360         i != e; ++i) {
361    FieldDecl *FD = *i;
362    QualType FT = FD->getType();
363
364    // FIXME: What are the right qualifiers here?
365    LValue LV = EmitLValueForField(Addr, FD, 0);
366    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
367      ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
368    } else {
369      RValue RV = EmitLoadOfLValue(LV, FT);
370      assert(RV.isScalar() &&
371             "Unexpected non-scalar rvalue during struct expansion.");
372      Args.push_back(RV.getScalarVal());
373    }
374  }
375}
376
377/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
378/// accessing some number of bytes out of it, try to gep into the struct to get
379/// at its inner goodness.  Dive as deep as possible without entering an element
380/// with an in-memory size smaller than DstSize.
381static llvm::Value *
382EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
383                                   const llvm::StructType *SrcSTy,
384                                   uint64_t DstSize, CodeGenFunction &CGF) {
385  // We can't dive into a zero-element struct.
386  if (SrcSTy->getNumElements() == 0) return SrcPtr;
387
388  const llvm::Type *FirstElt = SrcSTy->getElementType(0);
389
390  // If the first elt is at least as large as what we're looking for, or if the
391  // first element is the same size as the whole struct, we can enter it.
392  uint64_t FirstEltSize =
393    CGF.CGM.getTargetData().getTypeAllocSize(FirstElt);
394  if (FirstEltSize < DstSize &&
395      FirstEltSize < CGF.CGM.getTargetData().getTypeAllocSize(SrcSTy))
396    return SrcPtr;
397
398  // GEP into the first element.
399  SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
400
401  // If the first element is a struct, recurse.
402  const llvm::Type *SrcTy =
403    cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
404  if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
405    return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
406
407  return SrcPtr;
408}
409
410/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
411/// are either integers or pointers.  This does a truncation of the value if it
412/// is too large or a zero extension if it is too small.
413static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
414                                             const llvm::Type *Ty,
415                                             CodeGenFunction &CGF) {
416  if (Val->getType() == Ty)
417    return Val;
418
419  if (isa<llvm::PointerType>(Val->getType())) {
420    // If this is Pointer->Pointer avoid conversion to and from int.
421    if (isa<llvm::PointerType>(Ty))
422      return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
423
424    // Convert the pointer to an integer so we can play with its width.
425    Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
426  }
427
428  const llvm::Type *DestIntTy = Ty;
429  if (isa<llvm::PointerType>(DestIntTy))
430    DestIntTy = CGF.IntPtrTy;
431
432  if (Val->getType() != DestIntTy)
433    Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
434
435  if (isa<llvm::PointerType>(Ty))
436    Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
437  return Val;
438}
439
440
441
442/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
443/// a pointer to an object of type \arg Ty.
444///
445/// This safely handles the case when the src type is smaller than the
446/// destination type; in this situation the values of bits which not
447/// present in the src are undefined.
448static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
449                                      const llvm::Type *Ty,
450                                      CodeGenFunction &CGF) {
451  const llvm::Type *SrcTy =
452    cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
453
454  // If SrcTy and Ty are the same, just do a load.
455  if (SrcTy == Ty)
456    return CGF.Builder.CreateLoad(SrcPtr);
457
458  uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
459
460  if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
461    SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
462    SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
463  }
464
465  uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
466
467  // If the source and destination are integer or pointer types, just do an
468  // extension or truncation to the desired type.
469  if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
470      (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
471    llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
472    return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
473  }
474
475  // If load is legal, just bitcast the src pointer.
476  if (SrcSize >= DstSize) {
477    // Generally SrcSize is never greater than DstSize, since this means we are
478    // losing bits. However, this can happen in cases where the structure has
479    // additional padding, for example due to a user specified alignment.
480    //
481    // FIXME: Assert that we aren't truncating non-padding bits when have access
482    // to that information.
483    llvm::Value *Casted =
484      CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
485    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
486    // FIXME: Use better alignment / avoid requiring aligned load.
487    Load->setAlignment(1);
488    return Load;
489  }
490
491  // Otherwise do coercion through memory. This is stupid, but
492  // simple.
493  llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
494  llvm::Value *Casted =
495    CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
496  llvm::StoreInst *Store =
497    CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
498  // FIXME: Use better alignment / avoid requiring aligned store.
499  Store->setAlignment(1);
500  return CGF.Builder.CreateLoad(Tmp);
501}
502
503/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
504/// where the source and destination may have different types.
505///
506/// This safely handles the case when the src type is larger than the
507/// destination type; the upper bits of the src will be lost.
508static void CreateCoercedStore(llvm::Value *Src,
509                               llvm::Value *DstPtr,
510                               bool DstIsVolatile,
511                               CodeGenFunction &CGF) {
512  const llvm::Type *SrcTy = Src->getType();
513  const llvm::Type *DstTy =
514    cast<llvm::PointerType>(DstPtr->getType())->getElementType();
515  if (SrcTy == DstTy) {
516    CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
517    return;
518  }
519
520  uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
521
522  if (const llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
523    DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
524    DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
525  }
526
527  // If the source and destination are integer or pointer types, just do an
528  // extension or truncation to the desired type.
529  if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
530      (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
531    Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
532    CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
533    return;
534  }
535
536  uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
537
538  // If store is legal, just bitcast the src pointer.
539  if (SrcSize <= DstSize) {
540    llvm::Value *Casted =
541      CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
542    // FIXME: Use better alignment / avoid requiring aligned store.
543    CGF.Builder.CreateStore(Src, Casted, DstIsVolatile)->setAlignment(1);
544  } else {
545    // Otherwise do coercion through memory. This is stupid, but
546    // simple.
547
548    // Generally SrcSize is never greater than DstSize, since this means we are
549    // losing bits. However, this can happen in cases where the structure has
550    // additional padding, for example due to a user specified alignment.
551    //
552    // FIXME: Assert that we aren't truncating non-padding bits when have access
553    // to that information.
554    llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
555    CGF.Builder.CreateStore(Src, Tmp);
556    llvm::Value *Casted =
557      CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
558    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
559    // FIXME: Use better alignment / avoid requiring aligned load.
560    Load->setAlignment(1);
561    CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile);
562  }
563}
564
565/***/
566
567bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) {
568  return FI.getReturnInfo().isIndirect();
569}
570
571const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
572  const CGFunctionInfo &FI = getFunctionInfo(GD);
573
574  // For definition purposes, don't consider a K&R function variadic.
575  bool Variadic = false;
576  if (const FunctionProtoType *FPT =
577        cast<FunctionDecl>(GD.getDecl())->getType()->getAs<FunctionProtoType>())
578    Variadic = FPT->isVariadic();
579
580  return GetFunctionType(FI, Variadic, false);
581}
582
583const llvm::FunctionType *
584CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic,
585                              bool IsRecursive) {
586  std::vector<const llvm::Type*> ArgTys;
587
588  const llvm::Type *ResultType = 0;
589
590  QualType RetTy = FI.getReturnType();
591  const ABIArgInfo &RetAI = FI.getReturnInfo();
592  switch (RetAI.getKind()) {
593  case ABIArgInfo::Expand:
594    assert(0 && "Invalid ABI kind for return argument");
595
596  case ABIArgInfo::Extend:
597  case ABIArgInfo::Direct:
598    ResultType = ConvertType(RetTy, IsRecursive);
599    break;
600
601  case ABIArgInfo::Indirect: {
602    assert(!RetAI.getIndirectAlign() && "Align unused on indirect return.");
603    ResultType = llvm::Type::getVoidTy(getLLVMContext());
604    const llvm::Type *STy = ConvertType(RetTy, IsRecursive);
605    ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace()));
606    break;
607  }
608
609  case ABIArgInfo::Ignore:
610    ResultType = llvm::Type::getVoidTy(getLLVMContext());
611    break;
612
613  case ABIArgInfo::Coerce:
614    ResultType = RetAI.getCoerceToType();
615    break;
616  }
617
618  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
619         ie = FI.arg_end(); it != ie; ++it) {
620    const ABIArgInfo &AI = it->info;
621
622    switch (AI.getKind()) {
623    case ABIArgInfo::Ignore:
624      break;
625
626    case ABIArgInfo::Coerce: {
627      // If the coerce-to type is a first class aggregate, flatten it.  Either
628      // way is semantically identical, but fast-isel and the optimizer
629      // generally likes scalar values better than FCAs.
630      const llvm::Type *ArgTy = AI.getCoerceToType();
631      if (const llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgTy)) {
632        for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
633          ArgTys.push_back(STy->getElementType(i));
634      } else {
635        ArgTys.push_back(ArgTy);
636      }
637      break;
638    }
639
640    case ABIArgInfo::Indirect: {
641      // indirect arguments are always on the stack, which is addr space #0.
642      const llvm::Type *LTy = ConvertTypeForMem(it->type, IsRecursive);
643      ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
644      break;
645    }
646
647    case ABIArgInfo::Extend:
648    case ABIArgInfo::Direct:
649      ArgTys.push_back(ConvertType(it->type, IsRecursive));
650      break;
651
652    case ABIArgInfo::Expand:
653      GetExpandedTypes(it->type, ArgTys, IsRecursive);
654      break;
655    }
656  }
657
658  return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic);
659}
660
661const llvm::Type *
662CodeGenTypes::GetFunctionTypeForVTable(const CXXMethodDecl *MD) {
663  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
664
665  if (!VerifyFuncTypeComplete(FPT))
666    return GetFunctionType(getFunctionInfo(MD), FPT->isVariadic(), false);
667
668  return llvm::OpaqueType::get(getLLVMContext());
669}
670
671void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
672                                           const Decl *TargetDecl,
673                                           AttributeListType &PAL,
674                                           unsigned &CallingConv) {
675  unsigned FuncAttrs = 0;
676  unsigned RetAttrs = 0;
677
678  CallingConv = FI.getEffectiveCallingConvention();
679
680  if (FI.isNoReturn())
681    FuncAttrs |= llvm::Attribute::NoReturn;
682
683  // FIXME: handle sseregparm someday...
684  if (TargetDecl) {
685    if (TargetDecl->hasAttr<NoThrowAttr>())
686      FuncAttrs |= llvm::Attribute::NoUnwind;
687    if (TargetDecl->hasAttr<NoReturnAttr>())
688      FuncAttrs |= llvm::Attribute::NoReturn;
689    if (TargetDecl->hasAttr<ConstAttr>())
690      FuncAttrs |= llvm::Attribute::ReadNone;
691    else if (TargetDecl->hasAttr<PureAttr>())
692      FuncAttrs |= llvm::Attribute::ReadOnly;
693    if (TargetDecl->hasAttr<MallocAttr>())
694      RetAttrs |= llvm::Attribute::NoAlias;
695  }
696
697  if (CodeGenOpts.OptimizeSize)
698    FuncAttrs |= llvm::Attribute::OptimizeForSize;
699  if (CodeGenOpts.DisableRedZone)
700    FuncAttrs |= llvm::Attribute::NoRedZone;
701  if (CodeGenOpts.NoImplicitFloat)
702    FuncAttrs |= llvm::Attribute::NoImplicitFloat;
703
704  QualType RetTy = FI.getReturnType();
705  unsigned Index = 1;
706  const ABIArgInfo &RetAI = FI.getReturnInfo();
707  switch (RetAI.getKind()) {
708  case ABIArgInfo::Extend:
709   if (RetTy->isSignedIntegerType()) {
710     RetAttrs |= llvm::Attribute::SExt;
711   } else if (RetTy->isUnsignedIntegerType()) {
712     RetAttrs |= llvm::Attribute::ZExt;
713   }
714   // FALLTHROUGH
715  case ABIArgInfo::Direct:
716    break;
717
718  case ABIArgInfo::Indirect:
719    PAL.push_back(llvm::AttributeWithIndex::get(Index,
720                                                llvm::Attribute::StructRet));
721    ++Index;
722    // sret disables readnone and readonly
723    FuncAttrs &= ~(llvm::Attribute::ReadOnly |
724                   llvm::Attribute::ReadNone);
725    break;
726
727  case ABIArgInfo::Ignore:
728  case ABIArgInfo::Coerce:
729    break;
730
731  case ABIArgInfo::Expand:
732    assert(0 && "Invalid ABI kind for return argument");
733  }
734
735  if (RetAttrs)
736    PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
737
738  // FIXME: we need to honour command line settings also...
739  // FIXME: RegParm should be reduced in case of nested functions and/or global
740  // register variable.
741  signed RegParm = FI.getRegParm();
742
743  unsigned PointerWidth = getContext().Target.getPointerWidth(0);
744  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
745         ie = FI.arg_end(); it != ie; ++it) {
746    QualType ParamType = it->type;
747    const ABIArgInfo &AI = it->info;
748    unsigned Attributes = 0;
749
750    // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
751    // have the corresponding parameter variable.  It doesn't make
752    // sense to do it here because parameters are so fucked up.
753
754    switch (AI.getKind()) {
755    case ABIArgInfo::Coerce:
756      if (const llvm::StructType *STy =
757          dyn_cast<llvm::StructType>(AI.getCoerceToType()))
758        Index += STy->getNumElements();
759      else
760        ++Index;
761      continue;  // Skip index increment.
762
763    case ABIArgInfo::Indirect:
764      if (AI.getIndirectByVal())
765        Attributes |= llvm::Attribute::ByVal;
766
767      Attributes |=
768        llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
769      // byval disables readnone and readonly.
770      FuncAttrs &= ~(llvm::Attribute::ReadOnly |
771                     llvm::Attribute::ReadNone);
772      break;
773
774    case ABIArgInfo::Extend:
775     if (ParamType->isSignedIntegerType()) {
776       Attributes |= llvm::Attribute::SExt;
777     } else if (ParamType->isUnsignedIntegerType()) {
778       Attributes |= llvm::Attribute::ZExt;
779     }
780     // FALLS THROUGH
781    case ABIArgInfo::Direct:
782      if (RegParm > 0 &&
783          (ParamType->isIntegerType() || ParamType->isPointerType())) {
784        RegParm -=
785          (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
786        if (RegParm >= 0)
787          Attributes |= llvm::Attribute::InReg;
788      }
789      // FIXME: handle sseregparm someday...
790      break;
791
792    case ABIArgInfo::Ignore:
793      // Skip increment, no matching LLVM parameter.
794      continue;
795
796    case ABIArgInfo::Expand: {
797      std::vector<const llvm::Type*> Tys;
798      // FIXME: This is rather inefficient. Do we ever actually need to do
799      // anything here? The result should be just reconstructed on the other
800      // side, so extension should be a non-issue.
801      getTypes().GetExpandedTypes(ParamType, Tys, false);
802      Index += Tys.size();
803      continue;
804    }
805    }
806
807    if (Attributes)
808      PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
809    ++Index;
810  }
811  if (FuncAttrs)
812    PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
813}
814
815void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
816                                         llvm::Function *Fn,
817                                         const FunctionArgList &Args) {
818  // If this is an implicit-return-zero function, go ahead and
819  // initialize the return value.  TODO: it might be nice to have
820  // a more general mechanism for this that didn't require synthesized
821  // return statements.
822  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
823    if (FD->hasImplicitReturnZero()) {
824      QualType RetTy = FD->getResultType().getUnqualifiedType();
825      const llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
826      llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
827      Builder.CreateStore(Zero, ReturnValue);
828    }
829  }
830
831  // FIXME: We no longer need the types from FunctionArgList; lift up and
832  // simplify.
833
834  // Emit allocs for param decls.  Give the LLVM Argument nodes names.
835  llvm::Function::arg_iterator AI = Fn->arg_begin();
836
837  // Name the struct return argument.
838  if (CGM.ReturnTypeUsesSret(FI)) {
839    AI->setName("agg.result");
840    ++AI;
841  }
842
843  assert(FI.arg_size() == Args.size() &&
844         "Mismatch between function signature & arguments.");
845  CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
846  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
847       i != e; ++i, ++info_it) {
848    const VarDecl *Arg = i->first;
849    QualType Ty = info_it->type;
850    const ABIArgInfo &ArgI = info_it->info;
851
852    switch (ArgI.getKind()) {
853    case ABIArgInfo::Indirect: {
854      llvm::Value *V = AI;
855      if (hasAggregateLLVMType(Ty)) {
856        // Do nothing, aggregates and complex variables are accessed by
857        // reference.
858      } else {
859        // Load scalar value from indirect argument.
860        V = EmitLoadOfScalar(V, false, Ty);
861        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
862          // This must be a promotion, for something like
863          // "void a(x) short x; {..."
864          V = EmitScalarConversion(V, Ty, Arg->getType());
865        }
866      }
867      EmitParmDecl(*Arg, V);
868      break;
869    }
870
871    case ABIArgInfo::Extend:
872    case ABIArgInfo::Direct: {
873      assert(AI != Fn->arg_end() && "Argument mismatch!");
874      llvm::Value *V = AI;
875      if (hasAggregateLLVMType(Ty)) {
876        // Create a temporary alloca to hold the argument; the rest of
877        // codegen expects to access aggregates & complex values by
878        // reference.
879        V = CreateMemTemp(Ty);
880        Builder.CreateStore(AI, V);
881      } else {
882        if (Arg->getType().isRestrictQualified())
883          AI->addAttr(llvm::Attribute::NoAlias);
884
885        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
886          // This must be a promotion, for something like
887          // "void a(x) short x; {..."
888          V = EmitScalarConversion(V, Ty, Arg->getType());
889        }
890      }
891      EmitParmDecl(*Arg, V);
892      break;
893    }
894
895    case ABIArgInfo::Expand: {
896      // If this structure was expanded into multiple arguments then
897      // we need to create a temporary and reconstruct it from the
898      // arguments.
899      llvm::Value *Temp = CreateMemTemp(Ty, Arg->getName() + ".addr");
900      // FIXME: What are the right qualifiers here?
901      llvm::Function::arg_iterator End =
902        ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp, Qualifiers()), AI);
903      EmitParmDecl(*Arg, Temp);
904
905      // Name the arguments used in expansion and increment AI.
906      unsigned Index = 0;
907      for (; AI != End; ++AI, ++Index)
908        AI->setName(Arg->getName() + "." + llvm::Twine(Index));
909      continue;
910    }
911
912    case ABIArgInfo::Ignore:
913      // Initialize the local variable appropriately.
914      if (hasAggregateLLVMType(Ty)) {
915        EmitParmDecl(*Arg, CreateMemTemp(Ty));
916      } else {
917        EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())));
918      }
919
920      // Skip increment, no matching LLVM parameter.
921      continue;
922
923    case ABIArgInfo::Coerce: {
924      // FIXME: This is very wasteful; EmitParmDecl is just going to drop the
925      // result in a new alloca anyway, so we could just store into that
926      // directly if we broke the abstraction down more.
927      llvm::AllocaInst *Alloca = CreateMemTemp(Ty, "coerce");
928      Alloca->setAlignment(getContext().getDeclAlign(Arg).getQuantity());
929      llvm::Value *V = Alloca;
930
931      // If the coerce-to type is a first class aggregate, we flatten it and
932      // pass the elements. Either way is semantically identical, but fast-isel
933      // and the optimizer generally likes scalar values better than FCAs.
934      if (const llvm::StructType *STy =
935            dyn_cast<llvm::StructType>(ArgI.getCoerceToType())) {
936        // If the argument and alloca types match up, we don't have to build the
937        // FCA at all, emit a series of GEPs and stores, which is better for
938        // fast isel.
939        if (STy == cast<llvm::PointerType>(V->getType())->getElementType()) {
940          for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
941            assert(AI != Fn->arg_end() && "Argument mismatch!");
942            AI->setName(Arg->getName() + ".coerce" + llvm::Twine(i));
943            llvm::Value *EltPtr = Builder.CreateConstGEP2_32(V, 0, i);
944            Builder.CreateStore(AI++, EltPtr);
945          }
946        } else {
947          // Reconstruct the FCA here so we can do a coerced store.
948          llvm::Value *FormalArg = llvm::UndefValue::get(STy);
949          for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
950            assert(AI != Fn->arg_end() && "Argument mismatch!");
951            AI->setName(Arg->getName() + ".coerce" + llvm::Twine(i));
952            FormalArg = Builder.CreateInsertValue(FormalArg, AI++, i);
953          }
954          CreateCoercedStore(FormalArg, V, /*DestIsVolatile=*/false, *this);
955        }
956      } else {
957        // Simple case, just do a coerced store of the argument into the alloca.
958        assert(AI != Fn->arg_end() && "Argument mismatch!");
959        AI->setName(Arg->getName() + ".coerce");
960        CreateCoercedStore(AI++, V, /*DestIsVolatile=*/false, *this);
961      }
962
963
964      // Match to what EmitParmDecl is expecting for this type.
965      if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
966        V = EmitLoadOfScalar(V, false, Ty);
967        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
968          // This must be a promotion, for something like
969          // "void a(x) short x; {..."
970          V = EmitScalarConversion(V, Ty, Arg->getType());
971        }
972      }
973      EmitParmDecl(*Arg, V);
974      continue;  // Skip ++AI increment, already done.
975    }
976    }
977
978    ++AI;
979  }
980  assert(AI == Fn->arg_end() && "Argument mismatch!");
981}
982
983void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
984  // Functions with no result always return void.
985  if (ReturnValue == 0) {
986    Builder.CreateRetVoid();
987    return;
988  }
989
990  llvm::MDNode *RetDbgInfo = 0;
991  llvm::Value *RV = 0;
992  QualType RetTy = FI.getReturnType();
993  const ABIArgInfo &RetAI = FI.getReturnInfo();
994
995  switch (RetAI.getKind()) {
996  case ABIArgInfo::Indirect:
997    if (RetTy->isAnyComplexType()) {
998      ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
999      StoreComplexToAddr(RT, CurFn->arg_begin(), false);
1000    } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1001      // Do nothing; aggregrates get evaluated directly into the destination.
1002    } else {
1003      EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
1004                        false, RetTy);
1005    }
1006    break;
1007
1008  case ABIArgInfo::Extend:
1009  case ABIArgInfo::Direct: {
1010    // The internal return value temp always will have pointer-to-return-type
1011    // type, just do a load.
1012
1013    // If the instruction right before the insertion point is a store to the
1014    // return value, we can elide the load, zap the store, and usually zap the
1015    // alloca.
1016    llvm::BasicBlock *InsertBB = Builder.GetInsertBlock();
1017    llvm::StoreInst *SI = 0;
1018    if (InsertBB->empty() ||
1019        !(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) ||
1020        SI->getPointerOperand() != ReturnValue || SI->isVolatile()) {
1021      RV = Builder.CreateLoad(ReturnValue);
1022    } else {
1023      // Get the stored value and nuke the now-dead store.
1024      RetDbgInfo = SI->getDbgMetadata();
1025      RV = SI->getValueOperand();
1026      SI->eraseFromParent();
1027
1028      // If that was the only use of the return value, nuke it as well now.
1029      if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
1030        cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
1031        ReturnValue = 0;
1032      }
1033    }
1034    break;
1035  }
1036  case ABIArgInfo::Ignore:
1037    break;
1038
1039  case ABIArgInfo::Coerce:
1040    RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
1041    break;
1042
1043  case ABIArgInfo::Expand:
1044    assert(0 && "Invalid ABI kind for return argument");
1045  }
1046
1047  llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
1048  if (RetDbgInfo)
1049    Ret->setDbgMetadata(RetDbgInfo);
1050}
1051
1052RValue CodeGenFunction::EmitDelegateCallArg(const VarDecl *Param) {
1053  // StartFunction converted the ABI-lowered parameter(s) into a
1054  // local alloca.  We need to turn that into an r-value suitable
1055  // for EmitCall.
1056  llvm::Value *Local = GetAddrOfLocalVar(Param);
1057
1058  QualType ArgType = Param->getType();
1059
1060  // For the most part, we just need to load the alloca, except:
1061  // 1) aggregate r-values are actually pointers to temporaries, and
1062  // 2) references to aggregates are pointers directly to the aggregate.
1063  // I don't know why references to non-aggregates are different here.
1064  if (const ReferenceType *RefType = ArgType->getAs<ReferenceType>()) {
1065    if (hasAggregateLLVMType(RefType->getPointeeType()))
1066      return RValue::getAggregate(Local);
1067
1068    // Locals which are references to scalars are represented
1069    // with allocas holding the pointer.
1070    return RValue::get(Builder.CreateLoad(Local));
1071  }
1072
1073  if (ArgType->isAnyComplexType())
1074    return RValue::getComplex(LoadComplexFromAddr(Local, /*volatile*/ false));
1075
1076  if (hasAggregateLLVMType(ArgType))
1077    return RValue::getAggregate(Local);
1078
1079  return RValue::get(EmitLoadOfScalar(Local, false, ArgType));
1080}
1081
1082RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) {
1083  if (ArgType->isReferenceType())
1084    return EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
1085
1086  return EmitAnyExprToTemp(E);
1087}
1088
1089RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
1090                                 llvm::Value *Callee,
1091                                 ReturnValueSlot ReturnValue,
1092                                 const CallArgList &CallArgs,
1093                                 const Decl *TargetDecl,
1094                                 llvm::Instruction **callOrInvoke) {
1095  // FIXME: We no longer need the types from CallArgs; lift up and simplify.
1096  llvm::SmallVector<llvm::Value*, 16> Args;
1097
1098  // Handle struct-return functions by passing a pointer to the
1099  // location that we would like to return into.
1100  QualType RetTy = CallInfo.getReturnType();
1101  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
1102
1103
1104  // If the call returns a temporary with struct return, create a temporary
1105  // alloca to hold the result, unless one is given to us.
1106  if (CGM.ReturnTypeUsesSret(CallInfo)) {
1107    llvm::Value *Value = ReturnValue.getValue();
1108    if (!Value)
1109      Value = CreateMemTemp(RetTy);
1110    Args.push_back(Value);
1111  }
1112
1113  assert(CallInfo.arg_size() == CallArgs.size() &&
1114         "Mismatch between function signature & arguments.");
1115  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
1116  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
1117       I != E; ++I, ++info_it) {
1118    const ABIArgInfo &ArgInfo = info_it->info;
1119    RValue RV = I->first;
1120
1121    switch (ArgInfo.getKind()) {
1122    case ABIArgInfo::Indirect:
1123      if (RV.isScalar() || RV.isComplex()) {
1124        // Make a temporary alloca to pass the argument.
1125        Args.push_back(CreateMemTemp(I->second));
1126        if (RV.isScalar())
1127          EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, I->second);
1128        else
1129          StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
1130      } else {
1131        Args.push_back(RV.getAggregateAddr());
1132      }
1133      break;
1134
1135    case ABIArgInfo::Extend:
1136    case ABIArgInfo::Direct:
1137      if (RV.isScalar()) {
1138        Args.push_back(RV.getScalarVal());
1139      } else if (RV.isComplex()) {
1140        llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second));
1141        Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0);
1142        Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1);
1143        Args.push_back(Tmp);
1144      } else {
1145        Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
1146      }
1147      break;
1148
1149    case ABIArgInfo::Ignore:
1150      break;
1151
1152    case ABIArgInfo::Coerce: {
1153      // FIXME: Avoid the conversion through memory if possible.
1154      llvm::Value *SrcPtr;
1155      if (RV.isScalar()) {
1156        SrcPtr = CreateMemTemp(I->second, "coerce");
1157        EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, I->second);
1158      } else if (RV.isComplex()) {
1159        SrcPtr = CreateMemTemp(I->second, "coerce");
1160        StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
1161      } else
1162        SrcPtr = RV.getAggregateAddr();
1163
1164      // If the coerce-to type is a first class aggregate, we flatten it and
1165      // pass the elements. Either way is semantically identical, but fast-isel
1166      // and the optimizer generally likes scalar values better than FCAs.
1167      if (const llvm::StructType *STy =
1168            dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
1169        // If the argument and alloca types match up, we don't have to build the
1170        // FCA at all, emit a series of GEPs and loads, which is better for
1171        // fast isel.
1172        if (STy ==cast<llvm::PointerType>(SrcPtr->getType())->getElementType()){
1173          for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1174            llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
1175            Args.push_back(Builder.CreateLoad(EltPtr));
1176          }
1177        } else {
1178          // Otherwise, do a coerced load the entire FCA and handle the pieces.
1179          llvm::Value *SrcVal =
1180            CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), *this);
1181
1182          // Extract the elements of the value to pass in.
1183          for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
1184            Args.push_back(Builder.CreateExtractValue(SrcVal, i));
1185        }
1186      } else {
1187        // In the simple case, just pass the coerced loaded value.
1188        Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
1189                                         *this));
1190      }
1191
1192      break;
1193    }
1194
1195    case ABIArgInfo::Expand:
1196      ExpandTypeToArgs(I->second, RV, Args);
1197      break;
1198    }
1199  }
1200
1201  // If the callee is a bitcast of a function to a varargs pointer to function
1202  // type, check to see if we can remove the bitcast.  This handles some cases
1203  // with unprototyped functions.
1204  if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
1205    if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
1206      const llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
1207      const llvm::FunctionType *CurFT =
1208        cast<llvm::FunctionType>(CurPT->getElementType());
1209      const llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
1210
1211      if (CE->getOpcode() == llvm::Instruction::BitCast &&
1212          ActualFT->getReturnType() == CurFT->getReturnType() &&
1213          ActualFT->getNumParams() == CurFT->getNumParams() &&
1214          ActualFT->getNumParams() == Args.size()) {
1215        bool ArgsMatch = true;
1216        for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
1217          if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
1218            ArgsMatch = false;
1219            break;
1220          }
1221
1222        // Strip the cast if we can get away with it.  This is a nice cleanup,
1223        // but also allows us to inline the function at -O0 if it is marked
1224        // always_inline.
1225        if (ArgsMatch)
1226          Callee = CalleeF;
1227      }
1228    }
1229
1230
1231  llvm::BasicBlock *InvokeDest = getInvokeDest();
1232  unsigned CallingConv;
1233  CodeGen::AttributeListType AttributeList;
1234  CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
1235  llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
1236                                                   AttributeList.end());
1237
1238  llvm::CallSite CS;
1239  if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) {
1240    CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
1241  } else {
1242    llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
1243    CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
1244                              Args.data(), Args.data()+Args.size());
1245    EmitBlock(Cont);
1246  }
1247  if (callOrInvoke)
1248    *callOrInvoke = CS.getInstruction();
1249
1250  CS.setAttributes(Attrs);
1251  CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
1252
1253  // If the call doesn't return, finish the basic block and clear the
1254  // insertion point; this allows the rest of IRgen to discard
1255  // unreachable code.
1256  if (CS.doesNotReturn()) {
1257    Builder.CreateUnreachable();
1258    Builder.ClearInsertionPoint();
1259
1260    // FIXME: For now, emit a dummy basic block because expr emitters in
1261    // generally are not ready to handle emitting expressions at unreachable
1262    // points.
1263    EnsureInsertPoint();
1264
1265    // Return a reasonable RValue.
1266    return GetUndefRValue(RetTy);
1267  }
1268
1269  llvm::Instruction *CI = CS.getInstruction();
1270  if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
1271    CI->setName("call");
1272
1273  switch (RetAI.getKind()) {
1274  case ABIArgInfo::Indirect:
1275    if (RetTy->isAnyComplexType())
1276      return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
1277    if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1278      return RValue::getAggregate(Args[0]);
1279    return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy));
1280
1281  case ABIArgInfo::Extend:
1282  case ABIArgInfo::Direct:
1283    if (RetTy->isAnyComplexType()) {
1284      llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
1285      llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
1286      return RValue::getComplex(std::make_pair(Real, Imag));
1287    }
1288    if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1289      llvm::Value *DestPtr = ReturnValue.getValue();
1290      bool DestIsVolatile = ReturnValue.isVolatile();
1291
1292      if (!DestPtr) {
1293        DestPtr = CreateMemTemp(RetTy, "agg.tmp");
1294        DestIsVolatile = false;
1295      }
1296      Builder.CreateStore(CI, DestPtr, DestIsVolatile);
1297      return RValue::getAggregate(DestPtr);
1298    }
1299    return RValue::get(CI);
1300
1301  case ABIArgInfo::Ignore:
1302    // If we are ignoring an argument that had a result, make sure to
1303    // construct the appropriate return value for our caller.
1304    return GetUndefRValue(RetTy);
1305
1306  case ABIArgInfo::Coerce: {
1307    llvm::Value *DestPtr = ReturnValue.getValue();
1308    bool DestIsVolatile = ReturnValue.isVolatile();
1309
1310    if (!DestPtr) {
1311      DestPtr = CreateMemTemp(RetTy, "coerce");
1312      DestIsVolatile = false;
1313    }
1314
1315    CreateCoercedStore(CI, DestPtr, DestIsVolatile, *this);
1316    if (RetTy->isAnyComplexType())
1317      return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
1318    if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1319      return RValue::getAggregate(DestPtr);
1320    return RValue::get(EmitLoadOfScalar(DestPtr, false, RetTy));
1321  }
1322
1323  case ABIArgInfo::Expand:
1324    assert(0 && "Invalid ABI kind for return argument");
1325  }
1326
1327  assert(0 && "Unhandled ABIArgInfo::Kind");
1328  return RValue::get(0);
1329}
1330
1331/* VarArg handling */
1332
1333llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
1334  return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
1335}
1336