CGCall.cpp revision 309c59f6d3a4fd883fdf87334271df2c55338aae
1//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// These classes wrap the information about a call or function
11// definition used to handle ABI compliancy.
12//
13//===----------------------------------------------------------------------===//
14
15#include "CGCall.h"
16#include "CodeGenFunction.h"
17#include "CodeGenModule.h"
18#include "clang/Basic/TargetInfo.h"
19#include "clang/AST/Decl.h"
20#include "clang/AST/DeclCXX.h"
21#include "clang/AST/DeclObjC.h"
22#include "clang/Frontend/CodeGenOptions.h"
23#include "llvm/Attributes.h"
24#include "llvm/Support/CallSite.h"
25#include "llvm/Target/TargetData.h"
26
27#include "ABIInfo.h"
28
29using namespace clang;
30using namespace CodeGen;
31
32/***/
33
34// FIXME: Use iterator and sidestep silly type array creation.
35
36static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
37  switch (CC) {
38  default: return llvm::CallingConv::C;
39  case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
40  case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
41  case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
42  }
43}
44
45/// Derives the 'this' type for codegen purposes, i.e. ignoring method
46/// qualification.
47/// FIXME: address space qualification?
48static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
49  QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
50  return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
51}
52
53/// Returns the canonical formal type of the given C++ method.
54static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
55  return MD->getType()->getCanonicalTypeUnqualified()
56           .getAs<FunctionProtoType>();
57}
58
59/// Returns the "extra-canonicalized" return type, which discards
60/// qualifiers on the return type.  Codegen doesn't care about them,
61/// and it makes ABI code a little easier to be able to assume that
62/// all parameter and return types are top-level unqualified.
63static CanQualType GetReturnType(QualType RetTy) {
64  return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
65}
66
67const CGFunctionInfo &
68CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP) {
69  return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
70                         llvm::SmallVector<CanQualType, 16>(),
71                         FTNP->getExtInfo());
72}
73
74/// \param Args - contains any initial parameters besides those
75///   in the formal type
76static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT,
77                                  llvm::SmallVectorImpl<CanQualType> &ArgTys,
78                                             CanQual<FunctionProtoType> FTP) {
79  // FIXME: Kill copy.
80  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
81    ArgTys.push_back(FTP->getArgType(i));
82  CanQualType ResTy = FTP->getResultType().getUnqualifiedType();
83  return CGT.getFunctionInfo(ResTy, ArgTys,
84                             FTP->getExtInfo());
85}
86
87const CGFunctionInfo &
88CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP) {
89  llvm::SmallVector<CanQualType, 16> ArgTys;
90  return ::getFunctionInfo(*this, ArgTys, FTP);
91}
92
93static CallingConv getCallingConventionForDecl(const Decl *D) {
94  // Set the appropriate calling convention for the Function.
95  if (D->hasAttr<StdCallAttr>())
96    return CC_X86StdCall;
97
98  if (D->hasAttr<FastCallAttr>())
99    return CC_X86FastCall;
100
101  if (D->hasAttr<ThisCallAttr>())
102    return CC_X86ThisCall;
103
104  return CC_C;
105}
106
107const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD,
108                                                 const FunctionProtoType *FTP) {
109  llvm::SmallVector<CanQualType, 16> ArgTys;
110
111  // Add the 'this' pointer.
112  ArgTys.push_back(GetThisType(Context, RD));
113
114  return ::getFunctionInfo(*this, ArgTys,
115              FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
116}
117
118const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
119  llvm::SmallVector<CanQualType, 16> ArgTys;
120
121  // Add the 'this' pointer unless this is a static method.
122  if (MD->isInstance())
123    ArgTys.push_back(GetThisType(Context, MD->getParent()));
124
125  return ::getFunctionInfo(*this, ArgTys, GetFormalType(MD));
126}
127
128const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
129                                                    CXXCtorType Type) {
130  llvm::SmallVector<CanQualType, 16> ArgTys;
131
132  // Add the 'this' pointer.
133  ArgTys.push_back(GetThisType(Context, D->getParent()));
134
135  // Check if we need to add a VTT parameter (which has type void **).
136  if (Type == Ctor_Base && D->getParent()->getNumVBases() != 0)
137    ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
138
139  return ::getFunctionInfo(*this, ArgTys, GetFormalType(D));
140}
141
142const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXDestructorDecl *D,
143                                                    CXXDtorType Type) {
144  llvm::SmallVector<CanQualType, 16> ArgTys;
145
146  // Add the 'this' pointer.
147  ArgTys.push_back(GetThisType(Context, D->getParent()));
148
149  // Check if we need to add a VTT parameter (which has type void **).
150  if (Type == Dtor_Base && D->getParent()->getNumVBases() != 0)
151    ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
152
153  return ::getFunctionInfo(*this, ArgTys, GetFormalType(D));
154}
155
156const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
157  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
158    if (MD->isInstance())
159      return getFunctionInfo(MD);
160
161  CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
162  assert(isa<FunctionType>(FTy));
163  if (isa<FunctionNoProtoType>(FTy))
164    return getFunctionInfo(FTy.getAs<FunctionNoProtoType>());
165  assert(isa<FunctionProtoType>(FTy));
166  return getFunctionInfo(FTy.getAs<FunctionProtoType>());
167}
168
169const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
170  llvm::SmallVector<CanQualType, 16> ArgTys;
171  ArgTys.push_back(Context.getCanonicalParamType(MD->getSelfDecl()->getType()));
172  ArgTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
173  // FIXME: Kill copy?
174  for (ObjCMethodDecl::param_iterator i = MD->param_begin(),
175         e = MD->param_end(); i != e; ++i) {
176    ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
177  }
178  return getFunctionInfo(GetReturnType(MD->getResultType()),
179                         ArgTys,
180                         FunctionType::ExtInfo(
181                             /*NoReturn*/ false,
182                             /*RegParm*/ 0,
183                             getCallingConventionForDecl(MD)));
184}
185
186const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) {
187  // FIXME: Do we need to handle ObjCMethodDecl?
188  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
189
190  if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
191    return getFunctionInfo(CD, GD.getCtorType());
192
193  if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
194    return getFunctionInfo(DD, GD.getDtorType());
195
196  return getFunctionInfo(FD);
197}
198
199const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
200                                                    const CallArgList &Args,
201                                            const FunctionType::ExtInfo &Info) {
202  // FIXME: Kill copy.
203  llvm::SmallVector<CanQualType, 16> ArgTys;
204  for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
205       i != e; ++i)
206    ArgTys.push_back(Context.getCanonicalParamType(i->second));
207  return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
208}
209
210const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
211                                                    const FunctionArgList &Args,
212                                            const FunctionType::ExtInfo &Info) {
213  // FIXME: Kill copy.
214  llvm::SmallVector<CanQualType, 16> ArgTys;
215  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
216       i != e; ++i)
217    ArgTys.push_back(Context.getCanonicalParamType(i->second));
218  return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
219}
220
221const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
222                           const llvm::SmallVectorImpl<CanQualType> &ArgTys,
223                                            const FunctionType::ExtInfo &Info) {
224#ifndef NDEBUG
225  for (llvm::SmallVectorImpl<CanQualType>::const_iterator
226         I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I)
227    assert(I->isCanonicalAsParam());
228#endif
229
230  unsigned CC = ClangCallConvToLLVMCallConv(Info.getCC());
231
232  // Lookup or create unique function info.
233  llvm::FoldingSetNodeID ID;
234  CGFunctionInfo::Profile(ID, Info, ResTy,
235                          ArgTys.begin(), ArgTys.end());
236
237  void *InsertPos = 0;
238  CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
239  if (FI)
240    return *FI;
241
242  // Construct the function info.
243  FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getRegParm(), ResTy,
244                          ArgTys);
245  FunctionInfos.InsertNode(FI, InsertPos);
246
247  // Compute ABI information.
248  getABIInfo().computeInfo(*FI, getContext(), TheModule.getContext());
249
250  return *FI;
251}
252
253CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
254                               bool _NoReturn,
255                               unsigned _RegParm,
256                               CanQualType ResTy,
257                               const llvm::SmallVectorImpl<CanQualType> &ArgTys)
258  : CallingConvention(_CallingConvention),
259    EffectiveCallingConvention(_CallingConvention),
260    NoReturn(_NoReturn), RegParm(_RegParm)
261{
262  NumArgs = ArgTys.size();
263
264  // FIXME: Coallocate with the CGFunctionInfo object.
265  Args = new ArgInfo[1 + NumArgs];
266  Args[0].type = ResTy;
267  for (unsigned i = 0; i < NumArgs; ++i)
268    Args[1 + i].type = ArgTys[i];
269}
270
271/***/
272
273void CodeGenTypes::GetExpandedTypes(QualType Ty,
274                                    std::vector<const llvm::Type*> &ArgTys) {
275  const RecordType *RT = Ty->getAsStructureType();
276  assert(RT && "Can only expand structure types.");
277  const RecordDecl *RD = RT->getDecl();
278  assert(!RD->hasFlexibleArrayMember() &&
279         "Cannot expand structure with flexible array.");
280
281  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
282         i != e; ++i) {
283    const FieldDecl *FD = *i;
284    assert(!FD->isBitField() &&
285           "Cannot expand structure with bit-field members.");
286
287    QualType FT = FD->getType();
288    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
289      GetExpandedTypes(FT, ArgTys);
290    } else {
291      ArgTys.push_back(ConvertType(FT));
292    }
293  }
294}
295
296llvm::Function::arg_iterator
297CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
298                                    llvm::Function::arg_iterator AI) {
299  const RecordType *RT = Ty->getAsStructureType();
300  assert(RT && "Can only expand structure types.");
301
302  RecordDecl *RD = RT->getDecl();
303  assert(LV.isSimple() &&
304         "Unexpected non-simple lvalue during struct expansion.");
305  llvm::Value *Addr = LV.getAddress();
306  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
307         i != e; ++i) {
308    FieldDecl *FD = *i;
309    QualType FT = FD->getType();
310
311    // FIXME: What are the right qualifiers here?
312    LValue LV = EmitLValueForField(Addr, FD, 0);
313    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
314      AI = ExpandTypeFromArgs(FT, LV, AI);
315    } else {
316      EmitStoreThroughLValue(RValue::get(AI), LV, FT);
317      ++AI;
318    }
319  }
320
321  return AI;
322}
323
324void
325CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
326                                  llvm::SmallVector<llvm::Value*, 16> &Args) {
327  const RecordType *RT = Ty->getAsStructureType();
328  assert(RT && "Can only expand structure types.");
329
330  RecordDecl *RD = RT->getDecl();
331  assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
332  llvm::Value *Addr = RV.getAggregateAddr();
333  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
334         i != e; ++i) {
335    FieldDecl *FD = *i;
336    QualType FT = FD->getType();
337
338    // FIXME: What are the right qualifiers here?
339    LValue LV = EmitLValueForField(Addr, FD, 0);
340    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
341      ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
342    } else {
343      RValue RV = EmitLoadOfLValue(LV, FT);
344      assert(RV.isScalar() &&
345             "Unexpected non-scalar rvalue during struct expansion.");
346      Args.push_back(RV.getScalarVal());
347    }
348  }
349}
350
351/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
352/// accessing some number of bytes out of it, try to gep into the struct to get
353/// at its inner goodness.  Dive as deep as possible without entering an element
354/// with an in-memory size smaller than DstSize.
355static llvm::Value *
356EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
357                                   const llvm::StructType *SrcSTy,
358                                   uint64_t DstSize, CodeGenFunction &CGF) {
359  // We can't dive into a zero-element struct.
360  if (SrcSTy->getNumElements() == 0) return SrcPtr;
361
362  const llvm::Type *FirstElt = SrcSTy->getElementType(0);
363
364  // If the first elt is at least as large as what we're looking for, or if the
365  // first element is the same size as the whole struct, we can enter it.
366  uint64_t FirstEltSize =
367    CGF.CGM.getTargetData().getTypeAllocSize(FirstElt);
368  if (FirstEltSize < DstSize &&
369      FirstEltSize < CGF.CGM.getTargetData().getTypeAllocSize(SrcSTy))
370    return SrcPtr;
371
372  // GEP into the first element.
373  SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
374
375  // If the first element is a struct, recurse.
376  const llvm::Type *SrcTy =
377    cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
378  if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
379    return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
380
381  return SrcPtr;
382}
383
384/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
385/// are either integers or pointers.  This does a truncation of the value if it
386/// is too large or a zero extension if it is too small.
387static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
388                                             const llvm::Type *Ty,
389                                             CodeGenFunction &CGF) {
390  if (Val->getType() == Ty)
391    return Val;
392
393  if (isa<llvm::PointerType>(Val->getType())) {
394    // If this is Pointer->Pointer avoid conversion to and from int.
395    if (isa<llvm::PointerType>(Ty))
396      return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
397
398    // Convert the pointer to an integer so we can play with its width.
399    Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
400  }
401
402  const llvm::Type *DestIntTy = Ty;
403  if (isa<llvm::PointerType>(DestIntTy))
404    DestIntTy = CGF.IntPtrTy;
405
406  if (Val->getType() != DestIntTy)
407    Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
408
409  if (isa<llvm::PointerType>(Ty))
410    Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
411  return Val;
412}
413
414
415
416/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
417/// a pointer to an object of type \arg Ty.
418///
419/// This safely handles the case when the src type is smaller than the
420/// destination type; in this situation the values of bits which not
421/// present in the src are undefined.
422static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
423                                      const llvm::Type *Ty,
424                                      CodeGenFunction &CGF) {
425  const llvm::Type *SrcTy =
426    cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
427
428  // If SrcTy and Ty are the same, just do a load.
429  if (SrcTy == Ty)
430    return CGF.Builder.CreateLoad(SrcPtr);
431
432  uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
433
434  if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
435    SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
436    SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
437  }
438
439  uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
440
441  // If the source and destination are integer or pointer types, just do an
442  // extension or truncation to the desired type.
443  if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
444      (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
445    llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
446    return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
447  }
448
449  // If load is legal, just bitcast the src pointer.
450  if (SrcSize >= DstSize) {
451    // Generally SrcSize is never greater than DstSize, since this means we are
452    // losing bits. However, this can happen in cases where the structure has
453    // additional padding, for example due to a user specified alignment.
454    //
455    // FIXME: Assert that we aren't truncating non-padding bits when have access
456    // to that information.
457    llvm::Value *Casted =
458      CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
459    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
460    // FIXME: Use better alignment / avoid requiring aligned load.
461    Load->setAlignment(1);
462    return Load;
463  }
464
465  // Otherwise do coercion through memory. This is stupid, but
466  // simple.
467  llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
468  llvm::Value *Casted =
469    CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
470  llvm::StoreInst *Store =
471    CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
472  // FIXME: Use better alignment / avoid requiring aligned store.
473  Store->setAlignment(1);
474  return CGF.Builder.CreateLoad(Tmp);
475}
476
477/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
478/// where the source and destination may have different types.
479///
480/// This safely handles the case when the src type is larger than the
481/// destination type; the upper bits of the src will be lost.
482static void CreateCoercedStore(llvm::Value *Src,
483                               llvm::Value *DstPtr,
484                               bool DstIsVolatile,
485                               CodeGenFunction &CGF) {
486  const llvm::Type *SrcTy = Src->getType();
487  const llvm::Type *DstTy =
488    cast<llvm::PointerType>(DstPtr->getType())->getElementType();
489  if (SrcTy == DstTy) {
490    CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
491    return;
492  }
493
494  uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
495
496  if (const llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
497    DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
498    DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
499  }
500
501  // If the source and destination are integer or pointer types, just do an
502  // extension or truncation to the desired type.
503  if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
504      (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
505    Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
506    CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
507    return;
508  }
509
510  uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
511
512  // If store is legal, just bitcast the src pointer.
513  if (SrcSize <= DstSize) {
514    llvm::Value *Casted =
515      CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
516    // FIXME: Use better alignment / avoid requiring aligned store.
517    CGF.Builder.CreateStore(Src, Casted, DstIsVolatile)->setAlignment(1);
518  } else {
519    // Otherwise do coercion through memory. This is stupid, but
520    // simple.
521
522    // Generally SrcSize is never greater than DstSize, since this means we are
523    // losing bits. However, this can happen in cases where the structure has
524    // additional padding, for example due to a user specified alignment.
525    //
526    // FIXME: Assert that we aren't truncating non-padding bits when have access
527    // to that information.
528    llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
529    CGF.Builder.CreateStore(Src, Tmp);
530    llvm::Value *Casted =
531      CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
532    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
533    // FIXME: Use better alignment / avoid requiring aligned load.
534    Load->setAlignment(1);
535    CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile);
536  }
537}
538
539/***/
540
541bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) {
542  return FI.getReturnInfo().isIndirect();
543}
544
545const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
546  const CGFunctionInfo &FI = getFunctionInfo(GD);
547
548  // For definition purposes, don't consider a K&R function variadic.
549  bool Variadic = false;
550  if (const FunctionProtoType *FPT =
551        cast<FunctionDecl>(GD.getDecl())->getType()->getAs<FunctionProtoType>())
552    Variadic = FPT->isVariadic();
553
554  return GetFunctionType(FI, Variadic);
555}
556
557const llvm::FunctionType *
558CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
559  std::vector<const llvm::Type*> ArgTys;
560
561  const llvm::Type *ResultType = 0;
562
563  QualType RetTy = FI.getReturnType();
564  const ABIArgInfo &RetAI = FI.getReturnInfo();
565  switch (RetAI.getKind()) {
566  case ABIArgInfo::Expand:
567    assert(0 && "Invalid ABI kind for return argument");
568
569  case ABIArgInfo::Extend:
570  case ABIArgInfo::Direct:
571    ResultType = ConvertType(RetTy);
572    break;
573
574  case ABIArgInfo::Indirect: {
575    assert(!RetAI.getIndirectAlign() && "Align unused on indirect return.");
576    ResultType = llvm::Type::getVoidTy(getLLVMContext());
577    const llvm::Type *STy = ConvertType(RetTy);
578    ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace()));
579    break;
580  }
581
582  case ABIArgInfo::Ignore:
583    ResultType = llvm::Type::getVoidTy(getLLVMContext());
584    break;
585
586  case ABIArgInfo::Coerce:
587    ResultType = RetAI.getCoerceToType();
588    break;
589  }
590
591  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
592         ie = FI.arg_end(); it != ie; ++it) {
593    const ABIArgInfo &AI = it->info;
594
595    switch (AI.getKind()) {
596    case ABIArgInfo::Ignore:
597      break;
598
599    case ABIArgInfo::Coerce: {
600      // If the coerce-to type is a first class aggregate, flatten it.  Either
601      // way is semantically identical, but fast-isel and the optimizer
602      // generally likes scalar values better than FCAs.
603      const llvm::Type *ArgTy = AI.getCoerceToType();
604      if (const llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgTy)) {
605        for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
606          ArgTys.push_back(STy->getElementType(i));
607      } else {
608        ArgTys.push_back(ArgTy);
609      }
610      break;
611    }
612
613    case ABIArgInfo::Indirect: {
614      // indirect arguments are always on the stack, which is addr space #0.
615      const llvm::Type *LTy = ConvertTypeForMem(it->type);
616      ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
617      break;
618    }
619
620    case ABIArgInfo::Extend:
621    case ABIArgInfo::Direct:
622      ArgTys.push_back(ConvertType(it->type));
623      break;
624
625    case ABIArgInfo::Expand:
626      GetExpandedTypes(it->type, ArgTys);
627      break;
628    }
629  }
630
631  return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic);
632}
633
634const llvm::Type *
635CodeGenTypes::GetFunctionTypeForVTable(const CXXMethodDecl *MD) {
636  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
637
638  if (!VerifyFuncTypeComplete(FPT))
639    return GetFunctionType(getFunctionInfo(MD), FPT->isVariadic());
640
641  return llvm::OpaqueType::get(getLLVMContext());
642}
643
644void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
645                                           const Decl *TargetDecl,
646                                           AttributeListType &PAL,
647                                           unsigned &CallingConv) {
648  unsigned FuncAttrs = 0;
649  unsigned RetAttrs = 0;
650
651  CallingConv = FI.getEffectiveCallingConvention();
652
653  if (FI.isNoReturn())
654    FuncAttrs |= llvm::Attribute::NoReturn;
655
656  // FIXME: handle sseregparm someday...
657  if (TargetDecl) {
658    if (TargetDecl->hasAttr<NoThrowAttr>())
659      FuncAttrs |= llvm::Attribute::NoUnwind;
660    if (TargetDecl->hasAttr<NoReturnAttr>())
661      FuncAttrs |= llvm::Attribute::NoReturn;
662    if (TargetDecl->hasAttr<ConstAttr>())
663      FuncAttrs |= llvm::Attribute::ReadNone;
664    else if (TargetDecl->hasAttr<PureAttr>())
665      FuncAttrs |= llvm::Attribute::ReadOnly;
666    if (TargetDecl->hasAttr<MallocAttr>())
667      RetAttrs |= llvm::Attribute::NoAlias;
668  }
669
670  if (CodeGenOpts.OptimizeSize)
671    FuncAttrs |= llvm::Attribute::OptimizeForSize;
672  if (CodeGenOpts.DisableRedZone)
673    FuncAttrs |= llvm::Attribute::NoRedZone;
674  if (CodeGenOpts.NoImplicitFloat)
675    FuncAttrs |= llvm::Attribute::NoImplicitFloat;
676
677  QualType RetTy = FI.getReturnType();
678  unsigned Index = 1;
679  const ABIArgInfo &RetAI = FI.getReturnInfo();
680  switch (RetAI.getKind()) {
681  case ABIArgInfo::Extend:
682   if (RetTy->isSignedIntegerType()) {
683     RetAttrs |= llvm::Attribute::SExt;
684   } else if (RetTy->isUnsignedIntegerType()) {
685     RetAttrs |= llvm::Attribute::ZExt;
686   }
687   // FALLTHROUGH
688  case ABIArgInfo::Direct:
689    break;
690
691  case ABIArgInfo::Indirect:
692    PAL.push_back(llvm::AttributeWithIndex::get(Index,
693                                                llvm::Attribute::StructRet));
694    ++Index;
695    // sret disables readnone and readonly
696    FuncAttrs &= ~(llvm::Attribute::ReadOnly |
697                   llvm::Attribute::ReadNone);
698    break;
699
700  case ABIArgInfo::Ignore:
701  case ABIArgInfo::Coerce:
702    break;
703
704  case ABIArgInfo::Expand:
705    assert(0 && "Invalid ABI kind for return argument");
706  }
707
708  if (RetAttrs)
709    PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
710
711  // FIXME: we need to honour command line settings also...
712  // FIXME: RegParm should be reduced in case of nested functions and/or global
713  // register variable.
714  signed RegParm = FI.getRegParm();
715
716  unsigned PointerWidth = getContext().Target.getPointerWidth(0);
717  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
718         ie = FI.arg_end(); it != ie; ++it) {
719    QualType ParamType = it->type;
720    const ABIArgInfo &AI = it->info;
721    unsigned Attributes = 0;
722
723    // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
724    // have the corresponding parameter variable.  It doesn't make
725    // sense to do it here because parameters are so fucked up.
726
727    switch (AI.getKind()) {
728    case ABIArgInfo::Coerce:
729      if (const llvm::StructType *STy =
730          dyn_cast<llvm::StructType>(AI.getCoerceToType()))
731        Index += STy->getNumElements();
732      else
733        ++Index;
734      continue;  // Skip index increment.
735
736    case ABIArgInfo::Indirect:
737      if (AI.getIndirectByVal())
738        Attributes |= llvm::Attribute::ByVal;
739
740      Attributes |=
741        llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
742      // byval disables readnone and readonly.
743      FuncAttrs &= ~(llvm::Attribute::ReadOnly |
744                     llvm::Attribute::ReadNone);
745      break;
746
747    case ABIArgInfo::Extend:
748     if (ParamType->isSignedIntegerType()) {
749       Attributes |= llvm::Attribute::SExt;
750     } else if (ParamType->isUnsignedIntegerType()) {
751       Attributes |= llvm::Attribute::ZExt;
752     }
753     // FALLS THROUGH
754    case ABIArgInfo::Direct:
755      if (RegParm > 0 &&
756          (ParamType->isIntegerType() || ParamType->isPointerType())) {
757        RegParm -=
758          (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
759        if (RegParm >= 0)
760          Attributes |= llvm::Attribute::InReg;
761      }
762      // FIXME: handle sseregparm someday...
763      break;
764
765    case ABIArgInfo::Ignore:
766      // Skip increment, no matching LLVM parameter.
767      continue;
768
769    case ABIArgInfo::Expand: {
770      std::vector<const llvm::Type*> Tys;
771      // FIXME: This is rather inefficient. Do we ever actually need to do
772      // anything here? The result should be just reconstructed on the other
773      // side, so extension should be a non-issue.
774      getTypes().GetExpandedTypes(ParamType, Tys);
775      Index += Tys.size();
776      continue;
777    }
778    }
779
780    if (Attributes)
781      PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
782    ++Index;
783  }
784  if (FuncAttrs)
785    PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
786}
787
788void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
789                                         llvm::Function *Fn,
790                                         const FunctionArgList &Args) {
791  // If this is an implicit-return-zero function, go ahead and
792  // initialize the return value.  TODO: it might be nice to have
793  // a more general mechanism for this that didn't require synthesized
794  // return statements.
795  if (const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
796    if (FD->hasImplicitReturnZero()) {
797      QualType RetTy = FD->getResultType().getUnqualifiedType();
798      const llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
799      llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
800      Builder.CreateStore(Zero, ReturnValue);
801    }
802  }
803
804  // FIXME: We no longer need the types from FunctionArgList; lift up and
805  // simplify.
806
807  // Emit allocs for param decls.  Give the LLVM Argument nodes names.
808  llvm::Function::arg_iterator AI = Fn->arg_begin();
809
810  // Name the struct return argument.
811  if (CGM.ReturnTypeUsesSret(FI)) {
812    AI->setName("agg.result");
813    ++AI;
814  }
815
816  assert(FI.arg_size() == Args.size() &&
817         "Mismatch between function signature & arguments.");
818  CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
819  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
820       i != e; ++i, ++info_it) {
821    const VarDecl *Arg = i->first;
822    QualType Ty = info_it->type;
823    const ABIArgInfo &ArgI = info_it->info;
824
825    switch (ArgI.getKind()) {
826    case ABIArgInfo::Indirect: {
827      llvm::Value *V = AI;
828      if (hasAggregateLLVMType(Ty)) {
829        // Do nothing, aggregates and complex variables are accessed by
830        // reference.
831      } else {
832        // Load scalar value from indirect argument.
833        V = EmitLoadOfScalar(V, false, Ty);
834        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
835          // This must be a promotion, for something like
836          // "void a(x) short x; {..."
837          V = EmitScalarConversion(V, Ty, Arg->getType());
838        }
839      }
840      EmitParmDecl(*Arg, V);
841      break;
842    }
843
844    case ABIArgInfo::Extend:
845    case ABIArgInfo::Direct: {
846      assert(AI != Fn->arg_end() && "Argument mismatch!");
847      llvm::Value *V = AI;
848      if (hasAggregateLLVMType(Ty)) {
849        // Create a temporary alloca to hold the argument; the rest of
850        // codegen expects to access aggregates & complex values by
851        // reference.
852        V = CreateMemTemp(Ty);
853        Builder.CreateStore(AI, V);
854      } else {
855        if (Arg->getType().isRestrictQualified())
856          AI->addAttr(llvm::Attribute::NoAlias);
857
858        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
859          // This must be a promotion, for something like
860          // "void a(x) short x; {..."
861          V = EmitScalarConversion(V, Ty, Arg->getType());
862        }
863      }
864      EmitParmDecl(*Arg, V);
865      break;
866    }
867
868    case ABIArgInfo::Expand: {
869      // If this structure was expanded into multiple arguments then
870      // we need to create a temporary and reconstruct it from the
871      // arguments.
872      llvm::Value *Temp = CreateMemTemp(Ty, Arg->getName() + ".addr");
873      // FIXME: What are the right qualifiers here?
874      llvm::Function::arg_iterator End =
875        ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp, Qualifiers()), AI);
876      EmitParmDecl(*Arg, Temp);
877
878      // Name the arguments used in expansion and increment AI.
879      unsigned Index = 0;
880      for (; AI != End; ++AI, ++Index)
881        AI->setName(Arg->getName() + "." + llvm::Twine(Index));
882      continue;
883    }
884
885    case ABIArgInfo::Ignore:
886      // Initialize the local variable appropriately.
887      if (hasAggregateLLVMType(Ty)) {
888        EmitParmDecl(*Arg, CreateMemTemp(Ty));
889      } else {
890        EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())));
891      }
892
893      // Skip increment, no matching LLVM parameter.
894      continue;
895
896    case ABIArgInfo::Coerce: {
897      // FIXME: This is very wasteful; EmitParmDecl is just going to drop the
898      // result in a new alloca anyway, so we could just store into that
899      // directly if we broke the abstraction down more.
900      llvm::Value *V = CreateMemTemp(Ty, "coerce");
901
902      // If the coerce-to type is a first class aggregate, we flatten it and
903      // pass the elements. Either way is semantically identical, but fast-isel
904      // and the optimizer generally likes scalar values better than FCAs.
905      if (const llvm::StructType *STy =
906            dyn_cast<llvm::StructType>(ArgI.getCoerceToType())) {
907        // If the argument and alloca types match up, we don't have to build the
908        // FCA at all, emit a series of GEPs and stores, which is better for
909        // fast isel.
910        if (STy == cast<llvm::PointerType>(V->getType())->getElementType()) {
911          for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
912            assert(AI != Fn->arg_end() && "Argument mismatch!");
913            llvm::Value *EltPtr = Builder.CreateConstGEP2_32(V, 0, i);
914            Builder.CreateStore(AI++, EltPtr);
915          }
916        } else {
917          // Reconstruct the FCA here so we can do a coerced store.
918          llvm::Value *FormalArg = llvm::UndefValue::get(STy);
919          for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
920            assert(AI != Fn->arg_end() && "Argument mismatch!");
921            FormalArg = Builder.CreateInsertValue(FormalArg, AI++, i);
922          }
923          CreateCoercedStore(FormalArg, V, /*DestIsVolatile=*/false, *this);
924        }
925      } else {
926        // Simple case, just do a coerced store of the argument into the alloca.
927        assert(AI != Fn->arg_end() && "Argument mismatch!");
928        CreateCoercedStore(AI++, V, /*DestIsVolatile=*/false, *this);
929      }
930
931
932      // Match to what EmitParmDecl is expecting for this type.
933      if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
934        V = EmitLoadOfScalar(V, false, Ty);
935        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
936          // This must be a promotion, for something like
937          // "void a(x) short x; {..."
938          V = EmitScalarConversion(V, Ty, Arg->getType());
939        }
940      }
941      EmitParmDecl(*Arg, V);
942      continue;  // Skip ++AI increment, already done.
943    }
944    }
945
946    ++AI;
947  }
948  assert(AI == Fn->arg_end() && "Argument mismatch!");
949}
950
951void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
952  // Functions with no result always return void.
953  if (ReturnValue == 0) {
954    Builder.CreateRetVoid();
955    return;
956  }
957
958  llvm::Value *RV = 0;
959  QualType RetTy = FI.getReturnType();
960  const ABIArgInfo &RetAI = FI.getReturnInfo();
961
962  switch (RetAI.getKind()) {
963  case ABIArgInfo::Indirect:
964    if (RetTy->isAnyComplexType()) {
965      ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
966      StoreComplexToAddr(RT, CurFn->arg_begin(), false);
967    } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
968      // Do nothing; aggregrates get evaluated directly into the destination.
969    } else {
970      EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
971                        false, RetTy);
972    }
973    break;
974
975  case ABIArgInfo::Extend:
976  case ABIArgInfo::Direct: {
977    // The internal return value temp always will have pointer-to-return-type
978    // type, just do a load.
979
980    // If the instruction right before the insertion point is a store to the
981    // return value, we can elide the load, zap the store, and usually zap the
982    // alloca.
983    llvm::BasicBlock *InsertBB = Builder.GetInsertBlock();
984    llvm::StoreInst *SI = 0;
985    if (InsertBB->empty() ||
986        !(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) ||
987        SI->getPointerOperand() != ReturnValue || SI->isVolatile()) {
988      RV = Builder.CreateLoad(ReturnValue);
989    } else {
990      // Get the stored value and nuke the now-dead store.
991      RV = SI->getValueOperand();
992      SI->eraseFromParent();
993
994      // If that was the only use of the return value, nuke it as well now.
995      if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
996        cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
997        ReturnValue = 0;
998      }
999    }
1000    break;
1001  }
1002  case ABIArgInfo::Ignore:
1003    break;
1004
1005  case ABIArgInfo::Coerce:
1006    RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
1007    break;
1008
1009  case ABIArgInfo::Expand:
1010    assert(0 && "Invalid ABI kind for return argument");
1011  }
1012
1013  if (RV)
1014    Builder.CreateRet(RV);
1015  else
1016    Builder.CreateRetVoid();
1017}
1018
1019RValue CodeGenFunction::EmitDelegateCallArg(const VarDecl *Param) {
1020  // StartFunction converted the ABI-lowered parameter(s) into a
1021  // local alloca.  We need to turn that into an r-value suitable
1022  // for EmitCall.
1023  llvm::Value *Local = GetAddrOfLocalVar(Param);
1024
1025  QualType ArgType = Param->getType();
1026
1027  // For the most part, we just need to load the alloca, except:
1028  // 1) aggregate r-values are actually pointers to temporaries, and
1029  // 2) references to aggregates are pointers directly to the aggregate.
1030  // I don't know why references to non-aggregates are different here.
1031  if (const ReferenceType *RefType = ArgType->getAs<ReferenceType>()) {
1032    if (hasAggregateLLVMType(RefType->getPointeeType()))
1033      return RValue::getAggregate(Local);
1034
1035    // Locals which are references to scalars are represented
1036    // with allocas holding the pointer.
1037    return RValue::get(Builder.CreateLoad(Local));
1038  }
1039
1040  if (ArgType->isAnyComplexType())
1041    return RValue::getComplex(LoadComplexFromAddr(Local, /*volatile*/ false));
1042
1043  if (hasAggregateLLVMType(ArgType))
1044    return RValue::getAggregate(Local);
1045
1046  return RValue::get(EmitLoadOfScalar(Local, false, ArgType));
1047}
1048
1049RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) {
1050  if (ArgType->isReferenceType())
1051    return EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
1052
1053  return EmitAnyExprToTemp(E);
1054}
1055
1056RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
1057                                 llvm::Value *Callee,
1058                                 ReturnValueSlot ReturnValue,
1059                                 const CallArgList &CallArgs,
1060                                 const Decl *TargetDecl,
1061                                 llvm::Instruction **callOrInvoke) {
1062  // FIXME: We no longer need the types from CallArgs; lift up and simplify.
1063  llvm::SmallVector<llvm::Value*, 16> Args;
1064
1065  // Handle struct-return functions by passing a pointer to the
1066  // location that we would like to return into.
1067  QualType RetTy = CallInfo.getReturnType();
1068  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
1069
1070
1071  // If the call returns a temporary with struct return, create a temporary
1072  // alloca to hold the result, unless one is given to us.
1073  if (CGM.ReturnTypeUsesSret(CallInfo)) {
1074    llvm::Value *Value = ReturnValue.getValue();
1075    if (!Value)
1076      Value = CreateMemTemp(RetTy);
1077    Args.push_back(Value);
1078  }
1079
1080  assert(CallInfo.arg_size() == CallArgs.size() &&
1081         "Mismatch between function signature & arguments.");
1082  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
1083  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
1084       I != E; ++I, ++info_it) {
1085    const ABIArgInfo &ArgInfo = info_it->info;
1086    RValue RV = I->first;
1087
1088    switch (ArgInfo.getKind()) {
1089    case ABIArgInfo::Indirect:
1090      if (RV.isScalar() || RV.isComplex()) {
1091        // Make a temporary alloca to pass the argument.
1092        Args.push_back(CreateMemTemp(I->second));
1093        if (RV.isScalar())
1094          EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, I->second);
1095        else
1096          StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
1097      } else {
1098        Args.push_back(RV.getAggregateAddr());
1099      }
1100      break;
1101
1102    case ABIArgInfo::Extend:
1103    case ABIArgInfo::Direct:
1104      if (RV.isScalar()) {
1105        Args.push_back(RV.getScalarVal());
1106      } else if (RV.isComplex()) {
1107        llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second));
1108        Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0);
1109        Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1);
1110        Args.push_back(Tmp);
1111      } else {
1112        Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
1113      }
1114      break;
1115
1116    case ABIArgInfo::Ignore:
1117      break;
1118
1119    case ABIArgInfo::Coerce: {
1120      // FIXME: Avoid the conversion through memory if possible.
1121      llvm::Value *SrcPtr;
1122      if (RV.isScalar()) {
1123        SrcPtr = CreateMemTemp(I->second, "coerce");
1124        EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, I->second);
1125      } else if (RV.isComplex()) {
1126        SrcPtr = CreateMemTemp(I->second, "coerce");
1127        StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
1128      } else
1129        SrcPtr = RV.getAggregateAddr();
1130
1131      // If the coerce-to type is a first class aggregate, we flatten it and
1132      // pass the elements. Either way is semantically identical, but fast-isel
1133      // and the optimizer generally likes scalar values better than FCAs.
1134      if (const llvm::StructType *STy =
1135            dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
1136        // If the argument and alloca types match up, we don't have to build the
1137        // FCA at all, emit a series of GEPs and loads, which is better for
1138        // fast isel.
1139        if (STy ==cast<llvm::PointerType>(SrcPtr->getType())->getElementType()){
1140          for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1141            llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
1142            Args.push_back(Builder.CreateLoad(EltPtr));
1143          }
1144        } else {
1145          // Otherwise, do a coerced load the entire FCA and handle the pieces.
1146          llvm::Value *SrcVal =
1147            CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), *this);
1148
1149          // Extract the elements of the value to pass in.
1150          for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
1151            Args.push_back(Builder.CreateExtractValue(SrcVal, i));
1152        }
1153      } else {
1154        // In the simple case, just pass the coerced loaded value.
1155        Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
1156                                         *this));
1157      }
1158
1159      break;
1160    }
1161
1162    case ABIArgInfo::Expand:
1163      ExpandTypeToArgs(I->second, RV, Args);
1164      break;
1165    }
1166  }
1167
1168  // If the callee is a bitcast of a function to a varargs pointer to function
1169  // type, check to see if we can remove the bitcast.  This handles some cases
1170  // with unprototyped functions.
1171  if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
1172    if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
1173      const llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
1174      const llvm::FunctionType *CurFT =
1175        cast<llvm::FunctionType>(CurPT->getElementType());
1176      const llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
1177
1178      if (CE->getOpcode() == llvm::Instruction::BitCast &&
1179          ActualFT->getReturnType() == CurFT->getReturnType() &&
1180          ActualFT->getNumParams() == CurFT->getNumParams() &&
1181          ActualFT->getNumParams() == Args.size()) {
1182        bool ArgsMatch = true;
1183        for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
1184          if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
1185            ArgsMatch = false;
1186            break;
1187          }
1188
1189        // Strip the cast if we can get away with it.  This is a nice cleanup,
1190        // but also allows us to inline the function at -O0 if it is marked
1191        // always_inline.
1192        if (ArgsMatch)
1193          Callee = CalleeF;
1194      }
1195    }
1196
1197
1198  llvm::BasicBlock *InvokeDest = getInvokeDest();
1199  unsigned CallingConv;
1200  CodeGen::AttributeListType AttributeList;
1201  CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
1202  llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
1203                                                   AttributeList.end());
1204
1205  llvm::CallSite CS;
1206  if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) {
1207    CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
1208  } else {
1209    llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
1210    CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
1211                              Args.data(), Args.data()+Args.size());
1212    EmitBlock(Cont);
1213  }
1214  if (callOrInvoke) {
1215    *callOrInvoke = CS.getInstruction();
1216  }
1217
1218  CS.setAttributes(Attrs);
1219  CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
1220
1221  // If the call doesn't return, finish the basic block and clear the
1222  // insertion point; this allows the rest of IRgen to discard
1223  // unreachable code.
1224  if (CS.doesNotReturn()) {
1225    Builder.CreateUnreachable();
1226    Builder.ClearInsertionPoint();
1227
1228    // FIXME: For now, emit a dummy basic block because expr emitters in
1229    // generally are not ready to handle emitting expressions at unreachable
1230    // points.
1231    EnsureInsertPoint();
1232
1233    // Return a reasonable RValue.
1234    return GetUndefRValue(RetTy);
1235  }
1236
1237  llvm::Instruction *CI = CS.getInstruction();
1238  if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
1239    CI->setName("call");
1240
1241  switch (RetAI.getKind()) {
1242  case ABIArgInfo::Indirect:
1243    if (RetTy->isAnyComplexType())
1244      return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
1245    if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1246      return RValue::getAggregate(Args[0]);
1247    return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy));
1248
1249  case ABIArgInfo::Extend:
1250  case ABIArgInfo::Direct:
1251    if (RetTy->isAnyComplexType()) {
1252      llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
1253      llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
1254      return RValue::getComplex(std::make_pair(Real, Imag));
1255    }
1256    if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1257      llvm::Value *DestPtr = ReturnValue.getValue();
1258      bool DestIsVolatile = ReturnValue.isVolatile();
1259
1260      if (!DestPtr) {
1261        DestPtr = CreateMemTemp(RetTy, "agg.tmp");
1262        DestIsVolatile = false;
1263      }
1264      Builder.CreateStore(CI, DestPtr, DestIsVolatile);
1265      return RValue::getAggregate(DestPtr);
1266    }
1267    return RValue::get(CI);
1268
1269  case ABIArgInfo::Ignore:
1270    // If we are ignoring an argument that had a result, make sure to
1271    // construct the appropriate return value for our caller.
1272    return GetUndefRValue(RetTy);
1273
1274  case ABIArgInfo::Coerce: {
1275    llvm::Value *DestPtr = ReturnValue.getValue();
1276    bool DestIsVolatile = ReturnValue.isVolatile();
1277
1278    if (!DestPtr) {
1279      DestPtr = CreateMemTemp(RetTy, "coerce");
1280      DestIsVolatile = false;
1281    }
1282
1283    CreateCoercedStore(CI, DestPtr, DestIsVolatile, *this);
1284    if (RetTy->isAnyComplexType())
1285      return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
1286    if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1287      return RValue::getAggregate(DestPtr);
1288    return RValue::get(EmitLoadOfScalar(DestPtr, false, RetTy));
1289  }
1290
1291  case ABIArgInfo::Expand:
1292    assert(0 && "Invalid ABI kind for return argument");
1293  }
1294
1295  assert(0 && "Unhandled ABIArgInfo::Kind");
1296  return RValue::get(0);
1297}
1298
1299/* VarArg handling */
1300
1301llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
1302  return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
1303}
1304