CGCall.cpp revision 8640cd6bf077e007fdb9bc8c9c5e319f7d70da96
1//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// These classes wrap the information about a call or function
11// definition used to handle ABI compliancy.
12//
13//===----------------------------------------------------------------------===//
14
15#include "CGCall.h"
16#include "CodeGenFunction.h"
17#include "CodeGenModule.h"
18#include "clang/Basic/TargetInfo.h"
19#include "clang/AST/Decl.h"
20#include "clang/AST/DeclCXX.h"
21#include "clang/AST/DeclObjC.h"
22#include "clang/Frontend/CodeGenOptions.h"
23#include "llvm/Attributes.h"
24#include "llvm/Support/CallSite.h"
25#include "llvm/Target/TargetData.h"
26
27#include "ABIInfo.h"
28
29using namespace clang;
30using namespace CodeGen;
31
32/***/
33
34// FIXME: Use iterator and sidestep silly type array creation.
35
36static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
37  switch (CC) {
38  default: return llvm::CallingConv::C;
39  case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
40  case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
41  case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
42  }
43}
44
45/// Derives the 'this' type for codegen purposes, i.e. ignoring method
46/// qualification.
47/// FIXME: address space qualification?
48static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
49  QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
50  return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
51}
52
53/// Returns the canonical formal type of the given C++ method.
54static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
55  return MD->getType()->getCanonicalTypeUnqualified()
56           .getAs<FunctionProtoType>();
57}
58
59/// Returns the "extra-canonicalized" return type, which discards
60/// qualifiers on the return type.  Codegen doesn't care about them,
61/// and it makes ABI code a little easier to be able to assume that
62/// all parameter and return types are top-level unqualified.
63static CanQualType GetReturnType(QualType RetTy) {
64  return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
65}
66
67const CGFunctionInfo &
68CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP) {
69  return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
70                         llvm::SmallVector<CanQualType, 16>(),
71                         FTNP->getExtInfo());
72}
73
74/// \param Args - contains any initial parameters besides those
75///   in the formal type
76static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT,
77                                  llvm::SmallVectorImpl<CanQualType> &ArgTys,
78                                             CanQual<FunctionProtoType> FTP) {
79  // FIXME: Kill copy.
80  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
81    ArgTys.push_back(FTP->getArgType(i));
82  CanQualType ResTy = FTP->getResultType().getUnqualifiedType();
83  return CGT.getFunctionInfo(ResTy, ArgTys,
84                             FTP->getExtInfo());
85}
86
87const CGFunctionInfo &
88CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP) {
89  llvm::SmallVector<CanQualType, 16> ArgTys;
90  return ::getFunctionInfo(*this, ArgTys, FTP);
91}
92
93static CallingConv getCallingConventionForDecl(const Decl *D) {
94  // Set the appropriate calling convention for the Function.
95  if (D->hasAttr<StdCallAttr>())
96    return CC_X86StdCall;
97
98  if (D->hasAttr<FastCallAttr>())
99    return CC_X86FastCall;
100
101  if (D->hasAttr<ThisCallAttr>())
102    return CC_X86ThisCall;
103
104  return CC_C;
105}
106
107const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD,
108                                                 const FunctionProtoType *FTP) {
109  llvm::SmallVector<CanQualType, 16> ArgTys;
110
111  // Add the 'this' pointer.
112  ArgTys.push_back(GetThisType(Context, RD));
113
114  return ::getFunctionInfo(*this, ArgTys,
115              FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
116}
117
118const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
119  llvm::SmallVector<CanQualType, 16> ArgTys;
120
121  // Add the 'this' pointer unless this is a static method.
122  if (MD->isInstance())
123    ArgTys.push_back(GetThisType(Context, MD->getParent()));
124
125  return ::getFunctionInfo(*this, ArgTys, GetFormalType(MD));
126}
127
128const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
129                                                    CXXCtorType Type) {
130  llvm::SmallVector<CanQualType, 16> ArgTys;
131
132  // Add the 'this' pointer.
133  ArgTys.push_back(GetThisType(Context, D->getParent()));
134
135  // Check if we need to add a VTT parameter (which has type void **).
136  if (Type == Ctor_Base && D->getParent()->getNumVBases() != 0)
137    ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
138
139  return ::getFunctionInfo(*this, ArgTys, GetFormalType(D));
140}
141
142const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXDestructorDecl *D,
143                                                    CXXDtorType Type) {
144  llvm::SmallVector<CanQualType, 16> ArgTys;
145
146  // Add the 'this' pointer.
147  ArgTys.push_back(GetThisType(Context, D->getParent()));
148
149  // Check if we need to add a VTT parameter (which has type void **).
150  if (Type == Dtor_Base && D->getParent()->getNumVBases() != 0)
151    ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
152
153  return ::getFunctionInfo(*this, ArgTys, GetFormalType(D));
154}
155
156const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
157  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
158    if (MD->isInstance())
159      return getFunctionInfo(MD);
160
161  CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
162  assert(isa<FunctionType>(FTy));
163  if (isa<FunctionNoProtoType>(FTy))
164    return getFunctionInfo(FTy.getAs<FunctionNoProtoType>());
165  assert(isa<FunctionProtoType>(FTy));
166  return getFunctionInfo(FTy.getAs<FunctionProtoType>());
167}
168
169const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
170  llvm::SmallVector<CanQualType, 16> ArgTys;
171  ArgTys.push_back(Context.getCanonicalParamType(MD->getSelfDecl()->getType()));
172  ArgTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
173  // FIXME: Kill copy?
174  for (ObjCMethodDecl::param_iterator i = MD->param_begin(),
175         e = MD->param_end(); i != e; ++i) {
176    ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
177  }
178  return getFunctionInfo(GetReturnType(MD->getResultType()),
179                         ArgTys,
180                         FunctionType::ExtInfo(
181                             /*NoReturn*/ false,
182                             /*RegParm*/ 0,
183                             getCallingConventionForDecl(MD)));
184}
185
186const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) {
187  // FIXME: Do we need to handle ObjCMethodDecl?
188  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
189
190  if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
191    return getFunctionInfo(CD, GD.getCtorType());
192
193  if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
194    return getFunctionInfo(DD, GD.getDtorType());
195
196  return getFunctionInfo(FD);
197}
198
199const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
200                                                    const CallArgList &Args,
201                                            const FunctionType::ExtInfo &Info) {
202  // FIXME: Kill copy.
203  llvm::SmallVector<CanQualType, 16> ArgTys;
204  for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
205       i != e; ++i)
206    ArgTys.push_back(Context.getCanonicalParamType(i->second));
207  return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
208}
209
210const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
211                                                    const FunctionArgList &Args,
212                                            const FunctionType::ExtInfo &Info) {
213  // FIXME: Kill copy.
214  llvm::SmallVector<CanQualType, 16> ArgTys;
215  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
216       i != e; ++i)
217    ArgTys.push_back(Context.getCanonicalParamType(i->second));
218  return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
219}
220
221const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
222                           const llvm::SmallVectorImpl<CanQualType> &ArgTys,
223                                            const FunctionType::ExtInfo &Info) {
224#ifndef NDEBUG
225  for (llvm::SmallVectorImpl<CanQualType>::const_iterator
226         I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I)
227    assert(I->isCanonicalAsParam());
228#endif
229
230  unsigned CC = ClangCallConvToLLVMCallConv(Info.getCC());
231
232  // Lookup or create unique function info.
233  llvm::FoldingSetNodeID ID;
234  CGFunctionInfo::Profile(ID, Info, ResTy,
235                          ArgTys.begin(), ArgTys.end());
236
237  void *InsertPos = 0;
238  CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
239  if (FI)
240    return *FI;
241
242  // Construct the function info.
243  FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getRegParm(), ResTy,
244                          ArgTys);
245  FunctionInfos.InsertNode(FI, InsertPos);
246
247  // ABI lowering wants to know what our preferred type for the argument is in
248  // various situations, pass it in.
249  llvm::SmallVector<const llvm::Type *, 8> PreferredArgTypes;
250  for (llvm::SmallVectorImpl<CanQualType>::const_iterator
251       I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I)
252    PreferredArgTypes.push_back(ConvertType(*I));
253
254  // Compute ABI information.
255  getABIInfo().computeInfo(*FI, getContext(), TheModule.getContext(),
256                           PreferredArgTypes.data(), PreferredArgTypes.size());
257
258  return *FI;
259}
260
261CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
262                               bool _NoReturn,
263                               unsigned _RegParm,
264                               CanQualType ResTy,
265                               const llvm::SmallVectorImpl<CanQualType> &ArgTys)
266  : CallingConvention(_CallingConvention),
267    EffectiveCallingConvention(_CallingConvention),
268    NoReturn(_NoReturn), RegParm(_RegParm)
269{
270  NumArgs = ArgTys.size();
271
272  // FIXME: Coallocate with the CGFunctionInfo object.
273  Args = new ArgInfo[1 + NumArgs];
274  Args[0].type = ResTy;
275  for (unsigned i = 0; i < NumArgs; ++i)
276    Args[1 + i].type = ArgTys[i];
277}
278
279/***/
280
281void CodeGenTypes::GetExpandedTypes(QualType Ty,
282                                    std::vector<const llvm::Type*> &ArgTys) {
283  const RecordType *RT = Ty->getAsStructureType();
284  assert(RT && "Can only expand structure types.");
285  const RecordDecl *RD = RT->getDecl();
286  assert(!RD->hasFlexibleArrayMember() &&
287         "Cannot expand structure with flexible array.");
288
289  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
290         i != e; ++i) {
291    const FieldDecl *FD = *i;
292    assert(!FD->isBitField() &&
293           "Cannot expand structure with bit-field members.");
294
295    QualType FT = FD->getType();
296    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
297      GetExpandedTypes(FT, ArgTys);
298    } else {
299      ArgTys.push_back(ConvertType(FT));
300    }
301  }
302}
303
304llvm::Function::arg_iterator
305CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
306                                    llvm::Function::arg_iterator AI) {
307  const RecordType *RT = Ty->getAsStructureType();
308  assert(RT && "Can only expand structure types.");
309
310  RecordDecl *RD = RT->getDecl();
311  assert(LV.isSimple() &&
312         "Unexpected non-simple lvalue during struct expansion.");
313  llvm::Value *Addr = LV.getAddress();
314  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
315         i != e; ++i) {
316    FieldDecl *FD = *i;
317    QualType FT = FD->getType();
318
319    // FIXME: What are the right qualifiers here?
320    LValue LV = EmitLValueForField(Addr, FD, 0);
321    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
322      AI = ExpandTypeFromArgs(FT, LV, AI);
323    } else {
324      EmitStoreThroughLValue(RValue::get(AI), LV, FT);
325      ++AI;
326    }
327  }
328
329  return AI;
330}
331
332void
333CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
334                                  llvm::SmallVector<llvm::Value*, 16> &Args) {
335  const RecordType *RT = Ty->getAsStructureType();
336  assert(RT && "Can only expand structure types.");
337
338  RecordDecl *RD = RT->getDecl();
339  assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
340  llvm::Value *Addr = RV.getAggregateAddr();
341  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
342         i != e; ++i) {
343    FieldDecl *FD = *i;
344    QualType FT = FD->getType();
345
346    // FIXME: What are the right qualifiers here?
347    LValue LV = EmitLValueForField(Addr, FD, 0);
348    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
349      ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
350    } else {
351      RValue RV = EmitLoadOfLValue(LV, FT);
352      assert(RV.isScalar() &&
353             "Unexpected non-scalar rvalue during struct expansion.");
354      Args.push_back(RV.getScalarVal());
355    }
356  }
357}
358
359/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
360/// accessing some number of bytes out of it, try to gep into the struct to get
361/// at its inner goodness.  Dive as deep as possible without entering an element
362/// with an in-memory size smaller than DstSize.
363static llvm::Value *
364EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
365                                   const llvm::StructType *SrcSTy,
366                                   uint64_t DstSize, CodeGenFunction &CGF) {
367  // We can't dive into a zero-element struct.
368  if (SrcSTy->getNumElements() == 0) return SrcPtr;
369
370  const llvm::Type *FirstElt = SrcSTy->getElementType(0);
371
372  // If the first elt is at least as large as what we're looking for, or if the
373  // first element is the same size as the whole struct, we can enter it.
374  uint64_t FirstEltSize =
375    CGF.CGM.getTargetData().getTypeAllocSize(FirstElt);
376  if (FirstEltSize < DstSize &&
377      FirstEltSize < CGF.CGM.getTargetData().getTypeAllocSize(SrcSTy))
378    return SrcPtr;
379
380  // GEP into the first element.
381  SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
382
383  // If the first element is a struct, recurse.
384  const llvm::Type *SrcTy =
385    cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
386  if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
387    return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
388
389  return SrcPtr;
390}
391
392/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
393/// are either integers or pointers.  This does a truncation of the value if it
394/// is too large or a zero extension if it is too small.
395static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
396                                             const llvm::Type *Ty,
397                                             CodeGenFunction &CGF) {
398  if (Val->getType() == Ty)
399    return Val;
400
401  if (isa<llvm::PointerType>(Val->getType())) {
402    // If this is Pointer->Pointer avoid conversion to and from int.
403    if (isa<llvm::PointerType>(Ty))
404      return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
405
406    // Convert the pointer to an integer so we can play with its width.
407    Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
408  }
409
410  const llvm::Type *DestIntTy = Ty;
411  if (isa<llvm::PointerType>(DestIntTy))
412    DestIntTy = CGF.IntPtrTy;
413
414  if (Val->getType() != DestIntTy)
415    Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
416
417  if (isa<llvm::PointerType>(Ty))
418    Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
419  return Val;
420}
421
422
423
424/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
425/// a pointer to an object of type \arg Ty.
426///
427/// This safely handles the case when the src type is smaller than the
428/// destination type; in this situation the values of bits which not
429/// present in the src are undefined.
430static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
431                                      const llvm::Type *Ty,
432                                      CodeGenFunction &CGF) {
433  const llvm::Type *SrcTy =
434    cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
435
436  // If SrcTy and Ty are the same, just do a load.
437  if (SrcTy == Ty)
438    return CGF.Builder.CreateLoad(SrcPtr);
439
440  uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
441
442  if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
443    SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
444    SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
445  }
446
447  uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
448
449  // If the source and destination are integer or pointer types, just do an
450  // extension or truncation to the desired type.
451  if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
452      (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
453    llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
454    return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
455  }
456
457  // If load is legal, just bitcast the src pointer.
458  if (SrcSize >= DstSize) {
459    // Generally SrcSize is never greater than DstSize, since this means we are
460    // losing bits. However, this can happen in cases where the structure has
461    // additional padding, for example due to a user specified alignment.
462    //
463    // FIXME: Assert that we aren't truncating non-padding bits when have access
464    // to that information.
465    llvm::Value *Casted =
466      CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
467    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
468    // FIXME: Use better alignment / avoid requiring aligned load.
469    Load->setAlignment(1);
470    return Load;
471  }
472
473  // Otherwise do coercion through memory. This is stupid, but
474  // simple.
475  llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
476  llvm::Value *Casted =
477    CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
478  llvm::StoreInst *Store =
479    CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
480  // FIXME: Use better alignment / avoid requiring aligned store.
481  Store->setAlignment(1);
482  return CGF.Builder.CreateLoad(Tmp);
483}
484
485/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
486/// where the source and destination may have different types.
487///
488/// This safely handles the case when the src type is larger than the
489/// destination type; the upper bits of the src will be lost.
490static void CreateCoercedStore(llvm::Value *Src,
491                               llvm::Value *DstPtr,
492                               bool DstIsVolatile,
493                               CodeGenFunction &CGF) {
494  const llvm::Type *SrcTy = Src->getType();
495  const llvm::Type *DstTy =
496    cast<llvm::PointerType>(DstPtr->getType())->getElementType();
497  if (SrcTy == DstTy) {
498    CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
499    return;
500  }
501
502  uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
503
504  if (const llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
505    DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
506    DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
507  }
508
509  // If the source and destination are integer or pointer types, just do an
510  // extension or truncation to the desired type.
511  if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
512      (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
513    Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
514    CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
515    return;
516  }
517
518  uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
519
520  // If store is legal, just bitcast the src pointer.
521  if (SrcSize <= DstSize) {
522    llvm::Value *Casted =
523      CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
524    // FIXME: Use better alignment / avoid requiring aligned store.
525    CGF.Builder.CreateStore(Src, Casted, DstIsVolatile)->setAlignment(1);
526  } else {
527    // Otherwise do coercion through memory. This is stupid, but
528    // simple.
529
530    // Generally SrcSize is never greater than DstSize, since this means we are
531    // losing bits. However, this can happen in cases where the structure has
532    // additional padding, for example due to a user specified alignment.
533    //
534    // FIXME: Assert that we aren't truncating non-padding bits when have access
535    // to that information.
536    llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
537    CGF.Builder.CreateStore(Src, Tmp);
538    llvm::Value *Casted =
539      CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
540    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
541    // FIXME: Use better alignment / avoid requiring aligned load.
542    Load->setAlignment(1);
543    CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile);
544  }
545}
546
547/***/
548
549bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) {
550  return FI.getReturnInfo().isIndirect();
551}
552
553const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
554  const CGFunctionInfo &FI = getFunctionInfo(GD);
555
556  // For definition purposes, don't consider a K&R function variadic.
557  bool Variadic = false;
558  if (const FunctionProtoType *FPT =
559        cast<FunctionDecl>(GD.getDecl())->getType()->getAs<FunctionProtoType>())
560    Variadic = FPT->isVariadic();
561
562  return GetFunctionType(FI, Variadic);
563}
564
565const llvm::FunctionType *
566CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
567  std::vector<const llvm::Type*> ArgTys;
568
569  const llvm::Type *ResultType = 0;
570
571  QualType RetTy = FI.getReturnType();
572  const ABIArgInfo &RetAI = FI.getReturnInfo();
573  switch (RetAI.getKind()) {
574  case ABIArgInfo::Expand:
575    assert(0 && "Invalid ABI kind for return argument");
576
577  case ABIArgInfo::Extend:
578  case ABIArgInfo::Direct:
579    ResultType = ConvertType(RetTy);
580    break;
581
582  case ABIArgInfo::Indirect: {
583    assert(!RetAI.getIndirectAlign() && "Align unused on indirect return.");
584    ResultType = llvm::Type::getVoidTy(getLLVMContext());
585    const llvm::Type *STy = ConvertType(RetTy);
586    ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace()));
587    break;
588  }
589
590  case ABIArgInfo::Ignore:
591    ResultType = llvm::Type::getVoidTy(getLLVMContext());
592    break;
593
594  case ABIArgInfo::Coerce:
595    ResultType = RetAI.getCoerceToType();
596    break;
597  }
598
599  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
600         ie = FI.arg_end(); it != ie; ++it) {
601    const ABIArgInfo &AI = it->info;
602
603    switch (AI.getKind()) {
604    case ABIArgInfo::Ignore:
605      break;
606
607    case ABIArgInfo::Coerce: {
608      // If the coerce-to type is a first class aggregate, flatten it.  Either
609      // way is semantically identical, but fast-isel and the optimizer
610      // generally likes scalar values better than FCAs.
611      const llvm::Type *ArgTy = AI.getCoerceToType();
612      if (const llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgTy)) {
613        for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
614          ArgTys.push_back(STy->getElementType(i));
615      } else {
616        ArgTys.push_back(ArgTy);
617      }
618      break;
619    }
620
621    case ABIArgInfo::Indirect: {
622      // indirect arguments are always on the stack, which is addr space #0.
623      const llvm::Type *LTy = ConvertTypeForMem(it->type);
624      ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
625      break;
626    }
627
628    case ABIArgInfo::Extend:
629    case ABIArgInfo::Direct:
630      ArgTys.push_back(ConvertType(it->type));
631      break;
632
633    case ABIArgInfo::Expand:
634      GetExpandedTypes(it->type, ArgTys);
635      break;
636    }
637  }
638
639  return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic);
640}
641
642const llvm::Type *
643CodeGenTypes::GetFunctionTypeForVTable(const CXXMethodDecl *MD) {
644  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
645
646  if (!VerifyFuncTypeComplete(FPT))
647    return GetFunctionType(getFunctionInfo(MD), FPT->isVariadic());
648
649  return llvm::OpaqueType::get(getLLVMContext());
650}
651
652void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
653                                           const Decl *TargetDecl,
654                                           AttributeListType &PAL,
655                                           unsigned &CallingConv) {
656  unsigned FuncAttrs = 0;
657  unsigned RetAttrs = 0;
658
659  CallingConv = FI.getEffectiveCallingConvention();
660
661  if (FI.isNoReturn())
662    FuncAttrs |= llvm::Attribute::NoReturn;
663
664  // FIXME: handle sseregparm someday...
665  if (TargetDecl) {
666    if (TargetDecl->hasAttr<NoThrowAttr>())
667      FuncAttrs |= llvm::Attribute::NoUnwind;
668    if (TargetDecl->hasAttr<NoReturnAttr>())
669      FuncAttrs |= llvm::Attribute::NoReturn;
670    if (TargetDecl->hasAttr<ConstAttr>())
671      FuncAttrs |= llvm::Attribute::ReadNone;
672    else if (TargetDecl->hasAttr<PureAttr>())
673      FuncAttrs |= llvm::Attribute::ReadOnly;
674    if (TargetDecl->hasAttr<MallocAttr>())
675      RetAttrs |= llvm::Attribute::NoAlias;
676  }
677
678  if (CodeGenOpts.OptimizeSize)
679    FuncAttrs |= llvm::Attribute::OptimizeForSize;
680  if (CodeGenOpts.DisableRedZone)
681    FuncAttrs |= llvm::Attribute::NoRedZone;
682  if (CodeGenOpts.NoImplicitFloat)
683    FuncAttrs |= llvm::Attribute::NoImplicitFloat;
684
685  QualType RetTy = FI.getReturnType();
686  unsigned Index = 1;
687  const ABIArgInfo &RetAI = FI.getReturnInfo();
688  switch (RetAI.getKind()) {
689  case ABIArgInfo::Extend:
690   if (RetTy->isSignedIntegerType()) {
691     RetAttrs |= llvm::Attribute::SExt;
692   } else if (RetTy->isUnsignedIntegerType()) {
693     RetAttrs |= llvm::Attribute::ZExt;
694   }
695   // FALLTHROUGH
696  case ABIArgInfo::Direct:
697    break;
698
699  case ABIArgInfo::Indirect:
700    PAL.push_back(llvm::AttributeWithIndex::get(Index,
701                                                llvm::Attribute::StructRet));
702    ++Index;
703    // sret disables readnone and readonly
704    FuncAttrs &= ~(llvm::Attribute::ReadOnly |
705                   llvm::Attribute::ReadNone);
706    break;
707
708  case ABIArgInfo::Ignore:
709  case ABIArgInfo::Coerce:
710    break;
711
712  case ABIArgInfo::Expand:
713    assert(0 && "Invalid ABI kind for return argument");
714  }
715
716  if (RetAttrs)
717    PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
718
719  // FIXME: we need to honour command line settings also...
720  // FIXME: RegParm should be reduced in case of nested functions and/or global
721  // register variable.
722  signed RegParm = FI.getRegParm();
723
724  unsigned PointerWidth = getContext().Target.getPointerWidth(0);
725  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
726         ie = FI.arg_end(); it != ie; ++it) {
727    QualType ParamType = it->type;
728    const ABIArgInfo &AI = it->info;
729    unsigned Attributes = 0;
730
731    // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
732    // have the corresponding parameter variable.  It doesn't make
733    // sense to do it here because parameters are so fucked up.
734
735    switch (AI.getKind()) {
736    case ABIArgInfo::Coerce:
737      if (const llvm::StructType *STy =
738          dyn_cast<llvm::StructType>(AI.getCoerceToType()))
739        Index += STy->getNumElements();
740      else
741        ++Index;
742      continue;  // Skip index increment.
743
744    case ABIArgInfo::Indirect:
745      if (AI.getIndirectByVal())
746        Attributes |= llvm::Attribute::ByVal;
747
748      Attributes |=
749        llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
750      // byval disables readnone and readonly.
751      FuncAttrs &= ~(llvm::Attribute::ReadOnly |
752                     llvm::Attribute::ReadNone);
753      break;
754
755    case ABIArgInfo::Extend:
756     if (ParamType->isSignedIntegerType()) {
757       Attributes |= llvm::Attribute::SExt;
758     } else if (ParamType->isUnsignedIntegerType()) {
759       Attributes |= llvm::Attribute::ZExt;
760     }
761     // FALLS THROUGH
762    case ABIArgInfo::Direct:
763      if (RegParm > 0 &&
764          (ParamType->isIntegerType() || ParamType->isPointerType())) {
765        RegParm -=
766          (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
767        if (RegParm >= 0)
768          Attributes |= llvm::Attribute::InReg;
769      }
770      // FIXME: handle sseregparm someday...
771      break;
772
773    case ABIArgInfo::Ignore:
774      // Skip increment, no matching LLVM parameter.
775      continue;
776
777    case ABIArgInfo::Expand: {
778      std::vector<const llvm::Type*> Tys;
779      // FIXME: This is rather inefficient. Do we ever actually need to do
780      // anything here? The result should be just reconstructed on the other
781      // side, so extension should be a non-issue.
782      getTypes().GetExpandedTypes(ParamType, Tys);
783      Index += Tys.size();
784      continue;
785    }
786    }
787
788    if (Attributes)
789      PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
790    ++Index;
791  }
792  if (FuncAttrs)
793    PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
794}
795
796void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
797                                         llvm::Function *Fn,
798                                         const FunctionArgList &Args) {
799  // If this is an implicit-return-zero function, go ahead and
800  // initialize the return value.  TODO: it might be nice to have
801  // a more general mechanism for this that didn't require synthesized
802  // return statements.
803  if (const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
804    if (FD->hasImplicitReturnZero()) {
805      QualType RetTy = FD->getResultType().getUnqualifiedType();
806      const llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
807      llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
808      Builder.CreateStore(Zero, ReturnValue);
809    }
810  }
811
812  // FIXME: We no longer need the types from FunctionArgList; lift up and
813  // simplify.
814
815  // Emit allocs for param decls.  Give the LLVM Argument nodes names.
816  llvm::Function::arg_iterator AI = Fn->arg_begin();
817
818  // Name the struct return argument.
819  if (CGM.ReturnTypeUsesSret(FI)) {
820    AI->setName("agg.result");
821    ++AI;
822  }
823
824  assert(FI.arg_size() == Args.size() &&
825         "Mismatch between function signature & arguments.");
826  CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
827  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
828       i != e; ++i, ++info_it) {
829    const VarDecl *Arg = i->first;
830    QualType Ty = info_it->type;
831    const ABIArgInfo &ArgI = info_it->info;
832
833    switch (ArgI.getKind()) {
834    case ABIArgInfo::Indirect: {
835      llvm::Value *V = AI;
836      if (hasAggregateLLVMType(Ty)) {
837        // Do nothing, aggregates and complex variables are accessed by
838        // reference.
839      } else {
840        // Load scalar value from indirect argument.
841        V = EmitLoadOfScalar(V, false, Ty);
842        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
843          // This must be a promotion, for something like
844          // "void a(x) short x; {..."
845          V = EmitScalarConversion(V, Ty, Arg->getType());
846        }
847      }
848      EmitParmDecl(*Arg, V);
849      break;
850    }
851
852    case ABIArgInfo::Extend:
853    case ABIArgInfo::Direct: {
854      assert(AI != Fn->arg_end() && "Argument mismatch!");
855      llvm::Value *V = AI;
856      if (hasAggregateLLVMType(Ty)) {
857        // Create a temporary alloca to hold the argument; the rest of
858        // codegen expects to access aggregates & complex values by
859        // reference.
860        V = CreateMemTemp(Ty);
861        Builder.CreateStore(AI, V);
862      } else {
863        if (Arg->getType().isRestrictQualified())
864          AI->addAttr(llvm::Attribute::NoAlias);
865
866        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
867          // This must be a promotion, for something like
868          // "void a(x) short x; {..."
869          V = EmitScalarConversion(V, Ty, Arg->getType());
870        }
871      }
872      EmitParmDecl(*Arg, V);
873      break;
874    }
875
876    case ABIArgInfo::Expand: {
877      // If this structure was expanded into multiple arguments then
878      // we need to create a temporary and reconstruct it from the
879      // arguments.
880      llvm::Value *Temp = CreateMemTemp(Ty, Arg->getName() + ".addr");
881      // FIXME: What are the right qualifiers here?
882      llvm::Function::arg_iterator End =
883        ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp, Qualifiers()), AI);
884      EmitParmDecl(*Arg, Temp);
885
886      // Name the arguments used in expansion and increment AI.
887      unsigned Index = 0;
888      for (; AI != End; ++AI, ++Index)
889        AI->setName(Arg->getName() + "." + llvm::Twine(Index));
890      continue;
891    }
892
893    case ABIArgInfo::Ignore:
894      // Initialize the local variable appropriately.
895      if (hasAggregateLLVMType(Ty)) {
896        EmitParmDecl(*Arg, CreateMemTemp(Ty));
897      } else {
898        EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())));
899      }
900
901      // Skip increment, no matching LLVM parameter.
902      continue;
903
904    case ABIArgInfo::Coerce: {
905      // FIXME: This is very wasteful; EmitParmDecl is just going to drop the
906      // result in a new alloca anyway, so we could just store into that
907      // directly if we broke the abstraction down more.
908      llvm::Value *V = CreateMemTemp(Ty, "coerce");
909
910      // If the coerce-to type is a first class aggregate, we flatten it and
911      // pass the elements. Either way is semantically identical, but fast-isel
912      // and the optimizer generally likes scalar values better than FCAs.
913      if (const llvm::StructType *STy =
914            dyn_cast<llvm::StructType>(ArgI.getCoerceToType())) {
915        // If the argument and alloca types match up, we don't have to build the
916        // FCA at all, emit a series of GEPs and stores, which is better for
917        // fast isel.
918        if (STy == cast<llvm::PointerType>(V->getType())->getElementType()) {
919          for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
920            assert(AI != Fn->arg_end() && "Argument mismatch!");
921            AI->setName(Arg->getName() + ".coerce" + llvm::Twine(i));
922            llvm::Value *EltPtr = Builder.CreateConstGEP2_32(V, 0, i);
923            Builder.CreateStore(AI++, EltPtr);
924          }
925        } else {
926          // Reconstruct the FCA here so we can do a coerced store.
927          llvm::Value *FormalArg = llvm::UndefValue::get(STy);
928          for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
929            assert(AI != Fn->arg_end() && "Argument mismatch!");
930            AI->setName(Arg->getName() + ".coerce" + llvm::Twine(i));
931            FormalArg = Builder.CreateInsertValue(FormalArg, AI++, i);
932          }
933          CreateCoercedStore(FormalArg, V, /*DestIsVolatile=*/false, *this);
934        }
935      } else {
936        // Simple case, just do a coerced store of the argument into the alloca.
937        assert(AI != Fn->arg_end() && "Argument mismatch!");
938        AI->setName(Arg->getName() + ".coerce");
939        CreateCoercedStore(AI++, V, /*DestIsVolatile=*/false, *this);
940      }
941
942
943      // Match to what EmitParmDecl is expecting for this type.
944      if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
945        V = EmitLoadOfScalar(V, false, Ty);
946        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
947          // This must be a promotion, for something like
948          // "void a(x) short x; {..."
949          V = EmitScalarConversion(V, Ty, Arg->getType());
950        }
951      }
952      EmitParmDecl(*Arg, V);
953      continue;  // Skip ++AI increment, already done.
954    }
955    }
956
957    ++AI;
958  }
959  assert(AI == Fn->arg_end() && "Argument mismatch!");
960}
961
962void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
963  // Functions with no result always return void.
964  if (ReturnValue == 0) {
965    Builder.CreateRetVoid();
966    return;
967  }
968
969  llvm::Value *RV = 0;
970  QualType RetTy = FI.getReturnType();
971  const ABIArgInfo &RetAI = FI.getReturnInfo();
972
973  switch (RetAI.getKind()) {
974  case ABIArgInfo::Indirect:
975    if (RetTy->isAnyComplexType()) {
976      ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
977      StoreComplexToAddr(RT, CurFn->arg_begin(), false);
978    } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
979      // Do nothing; aggregrates get evaluated directly into the destination.
980    } else {
981      EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
982                        false, RetTy);
983    }
984    break;
985
986  case ABIArgInfo::Extend:
987  case ABIArgInfo::Direct: {
988    // The internal return value temp always will have pointer-to-return-type
989    // type, just do a load.
990
991    // If the instruction right before the insertion point is a store to the
992    // return value, we can elide the load, zap the store, and usually zap the
993    // alloca.
994    llvm::BasicBlock *InsertBB = Builder.GetInsertBlock();
995    llvm::StoreInst *SI = 0;
996    if (InsertBB->empty() ||
997        !(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) ||
998        SI->getPointerOperand() != ReturnValue || SI->isVolatile()) {
999      RV = Builder.CreateLoad(ReturnValue);
1000    } else {
1001      // Get the stored value and nuke the now-dead store.
1002      RV = SI->getValueOperand();
1003      SI->eraseFromParent();
1004
1005      // If that was the only use of the return value, nuke it as well now.
1006      if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
1007        cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
1008        ReturnValue = 0;
1009      }
1010    }
1011    break;
1012  }
1013  case ABIArgInfo::Ignore:
1014    break;
1015
1016  case ABIArgInfo::Coerce:
1017    RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
1018    break;
1019
1020  case ABIArgInfo::Expand:
1021    assert(0 && "Invalid ABI kind for return argument");
1022  }
1023
1024  if (RV)
1025    Builder.CreateRet(RV);
1026  else
1027    Builder.CreateRetVoid();
1028}
1029
1030RValue CodeGenFunction::EmitDelegateCallArg(const VarDecl *Param) {
1031  // StartFunction converted the ABI-lowered parameter(s) into a
1032  // local alloca.  We need to turn that into an r-value suitable
1033  // for EmitCall.
1034  llvm::Value *Local = GetAddrOfLocalVar(Param);
1035
1036  QualType ArgType = Param->getType();
1037
1038  // For the most part, we just need to load the alloca, except:
1039  // 1) aggregate r-values are actually pointers to temporaries, and
1040  // 2) references to aggregates are pointers directly to the aggregate.
1041  // I don't know why references to non-aggregates are different here.
1042  if (const ReferenceType *RefType = ArgType->getAs<ReferenceType>()) {
1043    if (hasAggregateLLVMType(RefType->getPointeeType()))
1044      return RValue::getAggregate(Local);
1045
1046    // Locals which are references to scalars are represented
1047    // with allocas holding the pointer.
1048    return RValue::get(Builder.CreateLoad(Local));
1049  }
1050
1051  if (ArgType->isAnyComplexType())
1052    return RValue::getComplex(LoadComplexFromAddr(Local, /*volatile*/ false));
1053
1054  if (hasAggregateLLVMType(ArgType))
1055    return RValue::getAggregate(Local);
1056
1057  return RValue::get(EmitLoadOfScalar(Local, false, ArgType));
1058}
1059
1060RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) {
1061  if (ArgType->isReferenceType())
1062    return EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
1063
1064  return EmitAnyExprToTemp(E);
1065}
1066
1067RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
1068                                 llvm::Value *Callee,
1069                                 ReturnValueSlot ReturnValue,
1070                                 const CallArgList &CallArgs,
1071                                 const Decl *TargetDecl,
1072                                 llvm::Instruction **callOrInvoke) {
1073  // FIXME: We no longer need the types from CallArgs; lift up and simplify.
1074  llvm::SmallVector<llvm::Value*, 16> Args;
1075
1076  // Handle struct-return functions by passing a pointer to the
1077  // location that we would like to return into.
1078  QualType RetTy = CallInfo.getReturnType();
1079  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
1080
1081
1082  // If the call returns a temporary with struct return, create a temporary
1083  // alloca to hold the result, unless one is given to us.
1084  if (CGM.ReturnTypeUsesSret(CallInfo)) {
1085    llvm::Value *Value = ReturnValue.getValue();
1086    if (!Value)
1087      Value = CreateMemTemp(RetTy);
1088    Args.push_back(Value);
1089  }
1090
1091  assert(CallInfo.arg_size() == CallArgs.size() &&
1092         "Mismatch between function signature & arguments.");
1093  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
1094  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
1095       I != E; ++I, ++info_it) {
1096    const ABIArgInfo &ArgInfo = info_it->info;
1097    RValue RV = I->first;
1098
1099    switch (ArgInfo.getKind()) {
1100    case ABIArgInfo::Indirect:
1101      if (RV.isScalar() || RV.isComplex()) {
1102        // Make a temporary alloca to pass the argument.
1103        Args.push_back(CreateMemTemp(I->second));
1104        if (RV.isScalar())
1105          EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, I->second);
1106        else
1107          StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
1108      } else {
1109        Args.push_back(RV.getAggregateAddr());
1110      }
1111      break;
1112
1113    case ABIArgInfo::Extend:
1114    case ABIArgInfo::Direct:
1115      if (RV.isScalar()) {
1116        Args.push_back(RV.getScalarVal());
1117      } else if (RV.isComplex()) {
1118        llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second));
1119        Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0);
1120        Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1);
1121        Args.push_back(Tmp);
1122      } else {
1123        Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
1124      }
1125      break;
1126
1127    case ABIArgInfo::Ignore:
1128      break;
1129
1130    case ABIArgInfo::Coerce: {
1131      // FIXME: Avoid the conversion through memory if possible.
1132      llvm::Value *SrcPtr;
1133      if (RV.isScalar()) {
1134        SrcPtr = CreateMemTemp(I->second, "coerce");
1135        EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, I->second);
1136      } else if (RV.isComplex()) {
1137        SrcPtr = CreateMemTemp(I->second, "coerce");
1138        StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
1139      } else
1140        SrcPtr = RV.getAggregateAddr();
1141
1142      // If the coerce-to type is a first class aggregate, we flatten it and
1143      // pass the elements. Either way is semantically identical, but fast-isel
1144      // and the optimizer generally likes scalar values better than FCAs.
1145      if (const llvm::StructType *STy =
1146            dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
1147        // If the argument and alloca types match up, we don't have to build the
1148        // FCA at all, emit a series of GEPs and loads, which is better for
1149        // fast isel.
1150        if (STy ==cast<llvm::PointerType>(SrcPtr->getType())->getElementType()){
1151          for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1152            llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
1153            Args.push_back(Builder.CreateLoad(EltPtr));
1154          }
1155        } else {
1156          // Otherwise, do a coerced load the entire FCA and handle the pieces.
1157          llvm::Value *SrcVal =
1158            CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), *this);
1159
1160          // Extract the elements of the value to pass in.
1161          for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
1162            Args.push_back(Builder.CreateExtractValue(SrcVal, i));
1163        }
1164      } else {
1165        // In the simple case, just pass the coerced loaded value.
1166        Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
1167                                         *this));
1168      }
1169
1170      break;
1171    }
1172
1173    case ABIArgInfo::Expand:
1174      ExpandTypeToArgs(I->second, RV, Args);
1175      break;
1176    }
1177  }
1178
1179  // If the callee is a bitcast of a function to a varargs pointer to function
1180  // type, check to see if we can remove the bitcast.  This handles some cases
1181  // with unprototyped functions.
1182  if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
1183    if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
1184      const llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
1185      const llvm::FunctionType *CurFT =
1186        cast<llvm::FunctionType>(CurPT->getElementType());
1187      const llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
1188
1189      if (CE->getOpcode() == llvm::Instruction::BitCast &&
1190          ActualFT->getReturnType() == CurFT->getReturnType() &&
1191          ActualFT->getNumParams() == CurFT->getNumParams() &&
1192          ActualFT->getNumParams() == Args.size()) {
1193        bool ArgsMatch = true;
1194        for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
1195          if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
1196            ArgsMatch = false;
1197            break;
1198          }
1199
1200        // Strip the cast if we can get away with it.  This is a nice cleanup,
1201        // but also allows us to inline the function at -O0 if it is marked
1202        // always_inline.
1203        if (ArgsMatch)
1204          Callee = CalleeF;
1205      }
1206    }
1207
1208
1209  llvm::BasicBlock *InvokeDest = getInvokeDest();
1210  unsigned CallingConv;
1211  CodeGen::AttributeListType AttributeList;
1212  CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
1213  llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
1214                                                   AttributeList.end());
1215
1216  llvm::CallSite CS;
1217  if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) {
1218    CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
1219  } else {
1220    llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
1221    CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
1222                              Args.data(), Args.data()+Args.size());
1223    EmitBlock(Cont);
1224  }
1225  if (callOrInvoke) {
1226    *callOrInvoke = CS.getInstruction();
1227  }
1228
1229  CS.setAttributes(Attrs);
1230  CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
1231
1232  // If the call doesn't return, finish the basic block and clear the
1233  // insertion point; this allows the rest of IRgen to discard
1234  // unreachable code.
1235  if (CS.doesNotReturn()) {
1236    Builder.CreateUnreachable();
1237    Builder.ClearInsertionPoint();
1238
1239    // FIXME: For now, emit a dummy basic block because expr emitters in
1240    // generally are not ready to handle emitting expressions at unreachable
1241    // points.
1242    EnsureInsertPoint();
1243
1244    // Return a reasonable RValue.
1245    return GetUndefRValue(RetTy);
1246  }
1247
1248  llvm::Instruction *CI = CS.getInstruction();
1249  if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
1250    CI->setName("call");
1251
1252  switch (RetAI.getKind()) {
1253  case ABIArgInfo::Indirect:
1254    if (RetTy->isAnyComplexType())
1255      return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
1256    if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1257      return RValue::getAggregate(Args[0]);
1258    return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy));
1259
1260  case ABIArgInfo::Extend:
1261  case ABIArgInfo::Direct:
1262    if (RetTy->isAnyComplexType()) {
1263      llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
1264      llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
1265      return RValue::getComplex(std::make_pair(Real, Imag));
1266    }
1267    if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1268      llvm::Value *DestPtr = ReturnValue.getValue();
1269      bool DestIsVolatile = ReturnValue.isVolatile();
1270
1271      if (!DestPtr) {
1272        DestPtr = CreateMemTemp(RetTy, "agg.tmp");
1273        DestIsVolatile = false;
1274      }
1275      Builder.CreateStore(CI, DestPtr, DestIsVolatile);
1276      return RValue::getAggregate(DestPtr);
1277    }
1278    return RValue::get(CI);
1279
1280  case ABIArgInfo::Ignore:
1281    // If we are ignoring an argument that had a result, make sure to
1282    // construct the appropriate return value for our caller.
1283    return GetUndefRValue(RetTy);
1284
1285  case ABIArgInfo::Coerce: {
1286    llvm::Value *DestPtr = ReturnValue.getValue();
1287    bool DestIsVolatile = ReturnValue.isVolatile();
1288
1289    if (!DestPtr) {
1290      DestPtr = CreateMemTemp(RetTy, "coerce");
1291      DestIsVolatile = false;
1292    }
1293
1294    CreateCoercedStore(CI, DestPtr, DestIsVolatile, *this);
1295    if (RetTy->isAnyComplexType())
1296      return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
1297    if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1298      return RValue::getAggregate(DestPtr);
1299    return RValue::get(EmitLoadOfScalar(DestPtr, false, RetTy));
1300  }
1301
1302  case ABIArgInfo::Expand:
1303    assert(0 && "Invalid ABI kind for return argument");
1304  }
1305
1306  assert(0 && "Unhandled ABIArgInfo::Kind");
1307  return RValue::get(0);
1308}
1309
1310/* VarArg handling */
1311
1312llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
1313  return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
1314}
1315