CGCall.cpp revision dd851595cb9123558c9029efdadb1b4be9881a3d
1//===--- CGCall.cpp - Encapsulate calling convention details ----*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// These classes wrap the information about a call or function
11// definition used to handle ABI compliancy.
12//
13//===----------------------------------------------------------------------===//
14
15#include "CGCall.h"
16#include "CGCXXABI.h"
17#include "ABIInfo.h"
18#include "CodeGenFunction.h"
19#include "CodeGenModule.h"
20#include "TargetInfo.h"
21#include "clang/Basic/TargetInfo.h"
22#include "clang/AST/Decl.h"
23#include "clang/AST/DeclCXX.h"
24#include "clang/AST/DeclObjC.h"
25#include "clang/Frontend/CodeGenOptions.h"
26#include "llvm/Attributes.h"
27#include "llvm/Support/CallSite.h"
28#include "llvm/DataLayout.h"
29#include "llvm/InlineAsm.h"
30#include "llvm/Transforms/Utils/Local.h"
31using namespace clang;
32using namespace CodeGen;
33
34/***/
35
36static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
37  switch (CC) {
38  default: return llvm::CallingConv::C;
39  case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
40  case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
41  case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
42  case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
43  case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
44  // TODO: add support for CC_X86Pascal to llvm
45  }
46}
47
48/// Derives the 'this' type for codegen purposes, i.e. ignoring method
49/// qualification.
50/// FIXME: address space qualification?
51static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
52  QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
53  return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
54}
55
56/// Returns the canonical formal type of the given C++ method.
57static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
58  return MD->getType()->getCanonicalTypeUnqualified()
59           .getAs<FunctionProtoType>();
60}
61
62/// Returns the "extra-canonicalized" return type, which discards
63/// qualifiers on the return type.  Codegen doesn't care about them,
64/// and it makes ABI code a little easier to be able to assume that
65/// all parameter and return types are top-level unqualified.
66static CanQualType GetReturnType(QualType RetTy) {
67  return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
68}
69
70/// Arrange the argument and result information for a value of the given
71/// unprototyped freestanding function type.
72const CGFunctionInfo &
73CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
74  // When translating an unprototyped function type, always use a
75  // variadic type.
76  return arrangeLLVMFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
77                                 ArrayRef<CanQualType>(),
78                                 FTNP->getExtInfo(),
79                                 RequiredArgs(0));
80}
81
82/// Arrange the LLVM function layout for a value of the given function
83/// type, on top of any implicit parameters already stored.  Use the
84/// given ExtInfo instead of the ExtInfo from the function type.
85static const CGFunctionInfo &arrangeLLVMFunctionInfo(CodeGenTypes &CGT,
86                                       SmallVectorImpl<CanQualType> &prefix,
87                                             CanQual<FunctionProtoType> FTP,
88                                              FunctionType::ExtInfo extInfo) {
89  RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
90  // FIXME: Kill copy.
91  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
92    prefix.push_back(FTP->getArgType(i));
93  CanQualType resultType = FTP->getResultType().getUnqualifiedType();
94  return CGT.arrangeLLVMFunctionInfo(resultType, prefix, extInfo, required);
95}
96
97/// Arrange the argument and result information for a free function (i.e.
98/// not a C++ or ObjC instance method) of the given type.
99static const CGFunctionInfo &arrangeFreeFunctionType(CodeGenTypes &CGT,
100                                      SmallVectorImpl<CanQualType> &prefix,
101                                            CanQual<FunctionProtoType> FTP) {
102  return arrangeLLVMFunctionInfo(CGT, prefix, FTP, FTP->getExtInfo());
103}
104
105/// Given the formal ext-info of a C++ instance method, adjust it
106/// according to the C++ ABI in effect.
107static void adjustCXXMethodInfo(CodeGenTypes &CGT,
108                                FunctionType::ExtInfo &extInfo,
109                                bool isVariadic) {
110  if (extInfo.getCC() == CC_Default) {
111    CallingConv CC = CGT.getContext().getDefaultCXXMethodCallConv(isVariadic);
112    extInfo = extInfo.withCallingConv(CC);
113  }
114}
115
116/// Arrange the argument and result information for a free function (i.e.
117/// not a C++ or ObjC instance method) of the given type.
118static const CGFunctionInfo &arrangeCXXMethodType(CodeGenTypes &CGT,
119                                      SmallVectorImpl<CanQualType> &prefix,
120                                            CanQual<FunctionProtoType> FTP) {
121  FunctionType::ExtInfo extInfo = FTP->getExtInfo();
122  adjustCXXMethodInfo(CGT, extInfo, FTP->isVariadic());
123  return arrangeLLVMFunctionInfo(CGT, prefix, FTP, extInfo);
124}
125
126/// Arrange the argument and result information for a value of the
127/// given freestanding function type.
128const CGFunctionInfo &
129CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
130  SmallVector<CanQualType, 16> argTypes;
131  return ::arrangeFreeFunctionType(*this, argTypes, FTP);
132}
133
134static CallingConv getCallingConventionForDecl(const Decl *D) {
135  // Set the appropriate calling convention for the Function.
136  if (D->hasAttr<StdCallAttr>())
137    return CC_X86StdCall;
138
139  if (D->hasAttr<FastCallAttr>())
140    return CC_X86FastCall;
141
142  if (D->hasAttr<ThisCallAttr>())
143    return CC_X86ThisCall;
144
145  if (D->hasAttr<PascalAttr>())
146    return CC_X86Pascal;
147
148  if (PcsAttr *PCS = D->getAttr<PcsAttr>())
149    return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
150
151  if (D->hasAttr<PnaclCallAttr>())
152    return CC_PnaclCall;
153
154  return CC_C;
155}
156
157/// Arrange the argument and result information for a call to an
158/// unknown C++ non-static member function of the given abstract type.
159/// The member function must be an ordinary function, i.e. not a
160/// constructor or destructor.
161const CGFunctionInfo &
162CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
163                                   const FunctionProtoType *FTP) {
164  SmallVector<CanQualType, 16> argTypes;
165
166  // Add the 'this' pointer.
167  argTypes.push_back(GetThisType(Context, RD));
168
169  return ::arrangeCXXMethodType(*this, argTypes,
170              FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
171}
172
173/// Arrange the argument and result information for a declaration or
174/// definition of the given C++ non-static member function.  The
175/// member function must be an ordinary function, i.e. not a
176/// constructor or destructor.
177const CGFunctionInfo &
178CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
179  assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!");
180  assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
181
182  CanQual<FunctionProtoType> prototype = GetFormalType(MD);
183
184  if (MD->isInstance()) {
185    // The abstract case is perfectly fine.
186    return arrangeCXXMethodType(MD->getParent(), prototype.getTypePtr());
187  }
188
189  return arrangeFreeFunctionType(prototype);
190}
191
192/// Arrange the argument and result information for a declaration
193/// or definition to the given constructor variant.
194const CGFunctionInfo &
195CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D,
196                                               CXXCtorType ctorKind) {
197  SmallVector<CanQualType, 16> argTypes;
198  argTypes.push_back(GetThisType(Context, D->getParent()));
199  CanQualType resultType = Context.VoidTy;
200
201  TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes);
202
203  CanQual<FunctionProtoType> FTP = GetFormalType(D);
204
205  RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size());
206
207  // Add the formal parameters.
208  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
209    argTypes.push_back(FTP->getArgType(i));
210
211  FunctionType::ExtInfo extInfo = FTP->getExtInfo();
212  adjustCXXMethodInfo(*this, extInfo, FTP->isVariadic());
213  return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo, required);
214}
215
216/// Arrange the argument and result information for a declaration,
217/// definition, or call to the given destructor variant.  It so
218/// happens that all three cases produce the same information.
219const CGFunctionInfo &
220CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D,
221                                   CXXDtorType dtorKind) {
222  SmallVector<CanQualType, 2> argTypes;
223  argTypes.push_back(GetThisType(Context, D->getParent()));
224  CanQualType resultType = Context.VoidTy;
225
226  TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes);
227
228  CanQual<FunctionProtoType> FTP = GetFormalType(D);
229  assert(FTP->getNumArgs() == 0 && "dtor with formal parameters");
230  assert(FTP->isVariadic() == 0 && "dtor with formal parameters");
231
232  FunctionType::ExtInfo extInfo = FTP->getExtInfo();
233  adjustCXXMethodInfo(*this, extInfo, false);
234  return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo,
235                                 RequiredArgs::All);
236}
237
238/// Arrange the argument and result information for the declaration or
239/// definition of the given function.
240const CGFunctionInfo &
241CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
242  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
243    if (MD->isInstance())
244      return arrangeCXXMethodDeclaration(MD);
245
246  CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
247
248  assert(isa<FunctionType>(FTy));
249
250  // When declaring a function without a prototype, always use a
251  // non-variadic type.
252  if (isa<FunctionNoProtoType>(FTy)) {
253    CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
254    return arrangeLLVMFunctionInfo(noProto->getResultType(),
255                                   ArrayRef<CanQualType>(),
256                                   noProto->getExtInfo(),
257                                   RequiredArgs::All);
258  }
259
260  assert(isa<FunctionProtoType>(FTy));
261  return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>());
262}
263
264/// Arrange the argument and result information for the declaration or
265/// definition of an Objective-C method.
266const CGFunctionInfo &
267CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
268  // It happens that this is the same as a call with no optional
269  // arguments, except also using the formal 'self' type.
270  return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
271}
272
273/// Arrange the argument and result information for the function type
274/// through which to perform a send to the given Objective-C method,
275/// using the given receiver type.  The receiver type is not always
276/// the 'self' type of the method or even an Objective-C pointer type.
277/// This is *not* the right method for actually performing such a
278/// message send, due to the possibility of optional arguments.
279const CGFunctionInfo &
280CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
281                                              QualType receiverType) {
282  SmallVector<CanQualType, 16> argTys;
283  argTys.push_back(Context.getCanonicalParamType(receiverType));
284  argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
285  // FIXME: Kill copy?
286  for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(),
287         e = MD->param_end(); i != e; ++i) {
288    argTys.push_back(Context.getCanonicalParamType((*i)->getType()));
289  }
290
291  FunctionType::ExtInfo einfo;
292  einfo = einfo.withCallingConv(getCallingConventionForDecl(MD));
293
294  if (getContext().getLangOpts().ObjCAutoRefCount &&
295      MD->hasAttr<NSReturnsRetainedAttr>())
296    einfo = einfo.withProducesResult(true);
297
298  RequiredArgs required =
299    (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
300
301  return arrangeLLVMFunctionInfo(GetReturnType(MD->getResultType()), argTys,
302                                 einfo, required);
303}
304
305const CGFunctionInfo &
306CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
307  // FIXME: Do we need to handle ObjCMethodDecl?
308  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
309
310  if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
311    return arrangeCXXConstructorDeclaration(CD, GD.getCtorType());
312
313  if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
314    return arrangeCXXDestructor(DD, GD.getDtorType());
315
316  return arrangeFunctionDeclaration(FD);
317}
318
319/// Figure out the rules for calling a function with the given formal
320/// type using the given arguments.  The arguments are necessary
321/// because the function might be unprototyped, in which case it's
322/// target-dependent in crazy ways.
323const CGFunctionInfo &
324CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
325                                      const FunctionType *fnType) {
326  RequiredArgs required = RequiredArgs::All;
327  if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
328    if (proto->isVariadic())
329      required = RequiredArgs(proto->getNumArgs());
330  } else if (CGM.getTargetCodeGenInfo()
331               .isNoProtoCallVariadic(args, cast<FunctionNoProtoType>(fnType))) {
332    required = RequiredArgs(0);
333  }
334
335  return arrangeFreeFunctionCall(fnType->getResultType(), args,
336                                 fnType->getExtInfo(), required);
337}
338
339const CGFunctionInfo &
340CodeGenTypes::arrangeFreeFunctionCall(QualType resultType,
341                                      const CallArgList &args,
342                                      FunctionType::ExtInfo info,
343                                      RequiredArgs required) {
344  // FIXME: Kill copy.
345  SmallVector<CanQualType, 16> argTypes;
346  for (CallArgList::const_iterator i = args.begin(), e = args.end();
347       i != e; ++i)
348    argTypes.push_back(Context.getCanonicalParamType(i->Ty));
349  return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info,
350                                 required);
351}
352
353/// Arrange a call to a C++ method, passing the given arguments.
354const CGFunctionInfo &
355CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
356                                   const FunctionProtoType *FPT,
357                                   RequiredArgs required) {
358  // FIXME: Kill copy.
359  SmallVector<CanQualType, 16> argTypes;
360  for (CallArgList::const_iterator i = args.begin(), e = args.end();
361       i != e; ++i)
362    argTypes.push_back(Context.getCanonicalParamType(i->Ty));
363
364  FunctionType::ExtInfo info = FPT->getExtInfo();
365  adjustCXXMethodInfo(*this, info, FPT->isVariadic());
366  return arrangeLLVMFunctionInfo(GetReturnType(FPT->getResultType()),
367                                 argTypes, info, required);
368}
369
370const CGFunctionInfo &
371CodeGenTypes::arrangeFunctionDeclaration(QualType resultType,
372                                         const FunctionArgList &args,
373                                         const FunctionType::ExtInfo &info,
374                                         bool isVariadic) {
375  // FIXME: Kill copy.
376  SmallVector<CanQualType, 16> argTypes;
377  for (FunctionArgList::const_iterator i = args.begin(), e = args.end();
378       i != e; ++i)
379    argTypes.push_back(Context.getCanonicalParamType((*i)->getType()));
380
381  RequiredArgs required =
382    (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
383  return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info,
384                                 required);
385}
386
387const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
388  return arrangeLLVMFunctionInfo(getContext().VoidTy, ArrayRef<CanQualType>(),
389                                 FunctionType::ExtInfo(), RequiredArgs::All);
390}
391
392/// Arrange the argument and result information for an abstract value
393/// of a given function type.  This is the method which all of the
394/// above functions ultimately defer to.
395const CGFunctionInfo &
396CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
397                                      ArrayRef<CanQualType> argTypes,
398                                      FunctionType::ExtInfo info,
399                                      RequiredArgs required) {
400#ifndef NDEBUG
401  for (ArrayRef<CanQualType>::const_iterator
402         I = argTypes.begin(), E = argTypes.end(); I != E; ++I)
403    assert(I->isCanonicalAsParam());
404#endif
405
406  unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
407
408  // Lookup or create unique function info.
409  llvm::FoldingSetNodeID ID;
410  CGFunctionInfo::Profile(ID, info, required, resultType, argTypes);
411
412  void *insertPos = 0;
413  CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
414  if (FI)
415    return *FI;
416
417  // Construct the function info.  We co-allocate the ArgInfos.
418  FI = CGFunctionInfo::create(CC, info, resultType, argTypes, required);
419  FunctionInfos.InsertNode(FI, insertPos);
420
421  bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted;
422  assert(inserted && "Recursively being processed?");
423
424  // Compute ABI information.
425  getABIInfo().computeInfo(*FI);
426
427  // Loop over all of the computed argument and return value info.  If any of
428  // them are direct or extend without a specified coerce type, specify the
429  // default now.
430  ABIArgInfo &retInfo = FI->getReturnInfo();
431  if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == 0)
432    retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
433
434  for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
435       I != E; ++I)
436    if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
437      I->info.setCoerceToType(ConvertType(I->type));
438
439  bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
440  assert(erased && "Not in set?");
441
442  return *FI;
443}
444
445CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
446                                       const FunctionType::ExtInfo &info,
447                                       CanQualType resultType,
448                                       ArrayRef<CanQualType> argTypes,
449                                       RequiredArgs required) {
450  void *buffer = operator new(sizeof(CGFunctionInfo) +
451                              sizeof(ArgInfo) * (argTypes.size() + 1));
452  CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
453  FI->CallingConvention = llvmCC;
454  FI->EffectiveCallingConvention = llvmCC;
455  FI->ASTCallingConvention = info.getCC();
456  FI->NoReturn = info.getNoReturn();
457  FI->ReturnsRetained = info.getProducesResult();
458  FI->Required = required;
459  FI->HasRegParm = info.getHasRegParm();
460  FI->RegParm = info.getRegParm();
461  FI->NumArgs = argTypes.size();
462  FI->getArgsBuffer()[0].type = resultType;
463  for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
464    FI->getArgsBuffer()[i + 1].type = argTypes[i];
465  return FI;
466}
467
468/***/
469
470void CodeGenTypes::GetExpandedTypes(QualType type,
471                     SmallVectorImpl<llvm::Type*> &expandedTypes) {
472  if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) {
473    uint64_t NumElts = AT->getSize().getZExtValue();
474    for (uint64_t Elt = 0; Elt < NumElts; ++Elt)
475      GetExpandedTypes(AT->getElementType(), expandedTypes);
476  } else if (const RecordType *RT = type->getAs<RecordType>()) {
477    const RecordDecl *RD = RT->getDecl();
478    assert(!RD->hasFlexibleArrayMember() &&
479           "Cannot expand structure with flexible array.");
480    if (RD->isUnion()) {
481      // Unions can be here only in degenerative cases - all the fields are same
482      // after flattening. Thus we have to use the "largest" field.
483      const FieldDecl *LargestFD = 0;
484      CharUnits UnionSize = CharUnits::Zero();
485
486      for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
487           i != e; ++i) {
488        const FieldDecl *FD = *i;
489        assert(!FD->isBitField() &&
490               "Cannot expand structure with bit-field members.");
491        CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
492        if (UnionSize < FieldSize) {
493          UnionSize = FieldSize;
494          LargestFD = FD;
495        }
496      }
497      if (LargestFD)
498        GetExpandedTypes(LargestFD->getType(), expandedTypes);
499    } else {
500      for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
501           i != e; ++i) {
502        assert(!i->isBitField() &&
503               "Cannot expand structure with bit-field members.");
504        GetExpandedTypes(i->getType(), expandedTypes);
505      }
506    }
507  } else if (const ComplexType *CT = type->getAs<ComplexType>()) {
508    llvm::Type *EltTy = ConvertType(CT->getElementType());
509    expandedTypes.push_back(EltTy);
510    expandedTypes.push_back(EltTy);
511  } else
512    expandedTypes.push_back(ConvertType(type));
513}
514
515llvm::Function::arg_iterator
516CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
517                                    llvm::Function::arg_iterator AI) {
518  assert(LV.isSimple() &&
519         "Unexpected non-simple lvalue during struct expansion.");
520
521  if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
522    unsigned NumElts = AT->getSize().getZExtValue();
523    QualType EltTy = AT->getElementType();
524    for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
525      llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, Elt);
526      LValue LV = MakeAddrLValue(EltAddr, EltTy);
527      AI = ExpandTypeFromArgs(EltTy, LV, AI);
528    }
529  } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
530    RecordDecl *RD = RT->getDecl();
531    if (RD->isUnion()) {
532      // Unions can be here only in degenerative cases - all the fields are same
533      // after flattening. Thus we have to use the "largest" field.
534      const FieldDecl *LargestFD = 0;
535      CharUnits UnionSize = CharUnits::Zero();
536
537      for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
538           i != e; ++i) {
539        const FieldDecl *FD = *i;
540        assert(!FD->isBitField() &&
541               "Cannot expand structure with bit-field members.");
542        CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
543        if (UnionSize < FieldSize) {
544          UnionSize = FieldSize;
545          LargestFD = FD;
546        }
547      }
548      if (LargestFD) {
549        // FIXME: What are the right qualifiers here?
550        LValue SubLV = EmitLValueForField(LV, LargestFD);
551        AI = ExpandTypeFromArgs(LargestFD->getType(), SubLV, AI);
552      }
553    } else {
554      for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
555           i != e; ++i) {
556        FieldDecl *FD = *i;
557        QualType FT = FD->getType();
558
559        // FIXME: What are the right qualifiers here?
560        LValue SubLV = EmitLValueForField(LV, FD);
561        AI = ExpandTypeFromArgs(FT, SubLV, AI);
562      }
563    }
564  } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
565    QualType EltTy = CT->getElementType();
566    llvm::Value *RealAddr = Builder.CreateStructGEP(LV.getAddress(), 0, "real");
567    EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy));
568    llvm::Value *ImagAddr = Builder.CreateStructGEP(LV.getAddress(), 1, "imag");
569    EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy));
570  } else {
571    EmitStoreThroughLValue(RValue::get(AI), LV);
572    ++AI;
573  }
574
575  return AI;
576}
577
578/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
579/// accessing some number of bytes out of it, try to gep into the struct to get
580/// at its inner goodness.  Dive as deep as possible without entering an element
581/// with an in-memory size smaller than DstSize.
582static llvm::Value *
583EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
584                                   llvm::StructType *SrcSTy,
585                                   uint64_t DstSize, CodeGenFunction &CGF) {
586  // We can't dive into a zero-element struct.
587  if (SrcSTy->getNumElements() == 0) return SrcPtr;
588
589  llvm::Type *FirstElt = SrcSTy->getElementType(0);
590
591  // If the first elt is at least as large as what we're looking for, or if the
592  // first element is the same size as the whole struct, we can enter it.
593  uint64_t FirstEltSize =
594    CGF.CGM.getDataLayout().getTypeAllocSize(FirstElt);
595  if (FirstEltSize < DstSize &&
596      FirstEltSize < CGF.CGM.getDataLayout().getTypeAllocSize(SrcSTy))
597    return SrcPtr;
598
599  // GEP into the first element.
600  SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
601
602  // If the first element is a struct, recurse.
603  llvm::Type *SrcTy =
604    cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
605  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
606    return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
607
608  return SrcPtr;
609}
610
611/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
612/// are either integers or pointers.  This does a truncation of the value if it
613/// is too large or a zero extension if it is too small.
614static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
615                                             llvm::Type *Ty,
616                                             CodeGenFunction &CGF) {
617  if (Val->getType() == Ty)
618    return Val;
619
620  if (isa<llvm::PointerType>(Val->getType())) {
621    // If this is Pointer->Pointer avoid conversion to and from int.
622    if (isa<llvm::PointerType>(Ty))
623      return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
624
625    // Convert the pointer to an integer so we can play with its width.
626    Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
627  }
628
629  llvm::Type *DestIntTy = Ty;
630  if (isa<llvm::PointerType>(DestIntTy))
631    DestIntTy = CGF.IntPtrTy;
632
633  if (Val->getType() != DestIntTy)
634    Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
635
636  if (isa<llvm::PointerType>(Ty))
637    Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
638  return Val;
639}
640
641
642
643/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
644/// a pointer to an object of type \arg Ty.
645///
646/// This safely handles the case when the src type is smaller than the
647/// destination type; in this situation the values of bits which not
648/// present in the src are undefined.
649static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
650                                      llvm::Type *Ty,
651                                      CodeGenFunction &CGF) {
652  llvm::Type *SrcTy =
653    cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
654
655  // If SrcTy and Ty are the same, just do a load.
656  if (SrcTy == Ty)
657    return CGF.Builder.CreateLoad(SrcPtr);
658
659  uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
660
661  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
662    SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
663    SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
664  }
665
666  uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
667
668  // If the source and destination are integer or pointer types, just do an
669  // extension or truncation to the desired type.
670  if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
671      (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
672    llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
673    return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
674  }
675
676  // If load is legal, just bitcast the src pointer.
677  if (SrcSize >= DstSize) {
678    // Generally SrcSize is never greater than DstSize, since this means we are
679    // losing bits. However, this can happen in cases where the structure has
680    // additional padding, for example due to a user specified alignment.
681    //
682    // FIXME: Assert that we aren't truncating non-padding bits when have access
683    // to that information.
684    llvm::Value *Casted =
685      CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
686    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
687    // FIXME: Use better alignment / avoid requiring aligned load.
688    Load->setAlignment(1);
689    return Load;
690  }
691
692  // Otherwise do coercion through memory. This is stupid, but
693  // simple.
694  llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
695  llvm::Value *Casted =
696    CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
697  llvm::StoreInst *Store =
698    CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
699  // FIXME: Use better alignment / avoid requiring aligned store.
700  Store->setAlignment(1);
701  return CGF.Builder.CreateLoad(Tmp);
702}
703
704// Function to store a first-class aggregate into memory.  We prefer to
705// store the elements rather than the aggregate to be more friendly to
706// fast-isel.
707// FIXME: Do we need to recurse here?
708static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
709                          llvm::Value *DestPtr, bool DestIsVolatile,
710                          bool LowAlignment) {
711  // Prefer scalar stores to first-class aggregate stores.
712  if (llvm::StructType *STy =
713        dyn_cast<llvm::StructType>(Val->getType())) {
714    for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
715      llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i);
716      llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
717      llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr,
718                                                    DestIsVolatile);
719      if (LowAlignment)
720        SI->setAlignment(1);
721    }
722  } else {
723    llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile);
724    if (LowAlignment)
725      SI->setAlignment(1);
726  }
727}
728
729/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
730/// where the source and destination may have different types.
731///
732/// This safely handles the case when the src type is larger than the
733/// destination type; the upper bits of the src will be lost.
734static void CreateCoercedStore(llvm::Value *Src,
735                               llvm::Value *DstPtr,
736                               bool DstIsVolatile,
737                               CodeGenFunction &CGF) {
738  llvm::Type *SrcTy = Src->getType();
739  llvm::Type *DstTy =
740    cast<llvm::PointerType>(DstPtr->getType())->getElementType();
741  if (SrcTy == DstTy) {
742    CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
743    return;
744  }
745
746  uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
747
748  if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
749    DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
750    DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
751  }
752
753  // If the source and destination are integer or pointer types, just do an
754  // extension or truncation to the desired type.
755  if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
756      (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
757    Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
758    CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
759    return;
760  }
761
762  uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
763
764  // If store is legal, just bitcast the src pointer.
765  if (SrcSize <= DstSize) {
766    llvm::Value *Casted =
767      CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
768    // FIXME: Use better alignment / avoid requiring aligned store.
769    BuildAggStore(CGF, Src, Casted, DstIsVolatile, true);
770  } else {
771    // Otherwise do coercion through memory. This is stupid, but
772    // simple.
773
774    // Generally SrcSize is never greater than DstSize, since this means we are
775    // losing bits. However, this can happen in cases where the structure has
776    // additional padding, for example due to a user specified alignment.
777    //
778    // FIXME: Assert that we aren't truncating non-padding bits when have access
779    // to that information.
780    llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
781    CGF.Builder.CreateStore(Src, Tmp);
782    llvm::Value *Casted =
783      CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
784    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
785    // FIXME: Use better alignment / avoid requiring aligned load.
786    Load->setAlignment(1);
787    CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile);
788  }
789}
790
791/***/
792
793bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
794  return FI.getReturnInfo().isIndirect();
795}
796
797bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
798  if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
799    switch (BT->getKind()) {
800    default:
801      return false;
802    case BuiltinType::Float:
803      return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Float);
804    case BuiltinType::Double:
805      return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Double);
806    case BuiltinType::LongDouble:
807      return getContext().getTargetInfo().useObjCFPRetForRealType(
808        TargetInfo::LongDouble);
809    }
810  }
811
812  return false;
813}
814
815bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
816  if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
817    if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
818      if (BT->getKind() == BuiltinType::LongDouble)
819        return getContext().getTargetInfo().useObjCFP2RetForComplexLongDouble();
820    }
821  }
822
823  return false;
824}
825
826llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
827  const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
828  return GetFunctionType(FI);
829}
830
831llvm::FunctionType *
832CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
833
834  bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted;
835  assert(Inserted && "Recursively being processed?");
836
837  SmallVector<llvm::Type*, 8> argTypes;
838  llvm::Type *resultType = 0;
839
840  const ABIArgInfo &retAI = FI.getReturnInfo();
841  switch (retAI.getKind()) {
842  case ABIArgInfo::Expand:
843    llvm_unreachable("Invalid ABI kind for return argument");
844
845  case ABIArgInfo::Extend:
846  case ABIArgInfo::Direct:
847    resultType = retAI.getCoerceToType();
848    break;
849
850  case ABIArgInfo::Indirect: {
851    assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
852    resultType = llvm::Type::getVoidTy(getLLVMContext());
853
854    QualType ret = FI.getReturnType();
855    llvm::Type *ty = ConvertType(ret);
856    unsigned addressSpace = Context.getTargetAddressSpace(ret);
857    argTypes.push_back(llvm::PointerType::get(ty, addressSpace));
858    break;
859  }
860
861  case ABIArgInfo::Ignore:
862    resultType = llvm::Type::getVoidTy(getLLVMContext());
863    break;
864  }
865
866  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
867         ie = FI.arg_end(); it != ie; ++it) {
868    const ABIArgInfo &argAI = it->info;
869
870    // Insert a padding type to ensure proper alignment.
871    if (llvm::Type *PaddingType = argAI.getPaddingType())
872      argTypes.push_back(PaddingType);
873
874    switch (argAI.getKind()) {
875    case ABIArgInfo::Ignore:
876      break;
877
878    case ABIArgInfo::Indirect: {
879      // indirect arguments are always on the stack, which is addr space #0.
880      llvm::Type *LTy = ConvertTypeForMem(it->type);
881      argTypes.push_back(LTy->getPointerTo());
882      break;
883    }
884
885    case ABIArgInfo::Extend:
886    case ABIArgInfo::Direct: {
887      // If the coerce-to type is a first class aggregate, flatten it.  Either
888      // way is semantically identical, but fast-isel and the optimizer
889      // generally likes scalar values better than FCAs.
890      llvm::Type *argType = argAI.getCoerceToType();
891      if (llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) {
892        for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
893          argTypes.push_back(st->getElementType(i));
894      } else {
895        argTypes.push_back(argType);
896      }
897      break;
898    }
899
900    case ABIArgInfo::Expand:
901      GetExpandedTypes(it->type, argTypes);
902      break;
903    }
904  }
905
906  bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
907  assert(Erased && "Not in set?");
908
909  return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic());
910}
911
912llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
913  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
914  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
915
916  if (!isFuncTypeConvertible(FPT))
917    return llvm::StructType::get(getLLVMContext());
918
919  const CGFunctionInfo *Info;
920  if (isa<CXXDestructorDecl>(MD))
921    Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType());
922  else
923    Info = &arrangeCXXMethodDeclaration(MD);
924  return GetFunctionType(*Info);
925}
926
927void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
928                                           const Decl *TargetDecl,
929                                           AttributeListType &PAL,
930                                           unsigned &CallingConv) {
931  llvm::AttrBuilder FuncAttrs;
932  llvm::AttrBuilder RetAttrs;
933
934  CallingConv = FI.getEffectiveCallingConvention();
935
936  if (FI.isNoReturn())
937    FuncAttrs.addAttribute(llvm::Attributes::NoReturn);
938
939  // FIXME: handle sseregparm someday...
940  if (TargetDecl) {
941    if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
942      FuncAttrs.addAttribute(llvm::Attributes::ReturnsTwice);
943    if (TargetDecl->hasAttr<NoThrowAttr>())
944      FuncAttrs.addAttribute(llvm::Attributes::NoUnwind);
945    else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
946      const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
947      if (FPT && FPT->isNothrow(getContext()))
948        FuncAttrs.addAttribute(llvm::Attributes::NoUnwind);
949    }
950
951    if (TargetDecl->hasAttr<NoReturnAttr>())
952      FuncAttrs.addAttribute(llvm::Attributes::NoReturn);
953
954    if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
955      FuncAttrs.addAttribute(llvm::Attributes::ReturnsTwice);
956
957    // 'const' and 'pure' attribute functions are also nounwind.
958    if (TargetDecl->hasAttr<ConstAttr>()) {
959      FuncAttrs.addAttribute(llvm::Attributes::ReadNone);
960      FuncAttrs.addAttribute(llvm::Attributes::NoUnwind);
961    } else if (TargetDecl->hasAttr<PureAttr>()) {
962      FuncAttrs.addAttribute(llvm::Attributes::ReadOnly);
963      FuncAttrs.addAttribute(llvm::Attributes::NoUnwind);
964    }
965    if (TargetDecl->hasAttr<MallocAttr>())
966      RetAttrs.addAttribute(llvm::Attributes::NoAlias);
967  }
968
969  if (CodeGenOpts.OptimizeSize)
970    FuncAttrs.addAttribute(llvm::Attributes::OptimizeForSize);
971  if (CodeGenOpts.OptimizeSize == 2)
972    FuncAttrs.addAttribute(llvm::Attributes::MinSize);
973  if (CodeGenOpts.DisableRedZone)
974    FuncAttrs.addAttribute(llvm::Attributes::NoRedZone);
975  if (CodeGenOpts.NoImplicitFloat)
976    FuncAttrs.addAttribute(llvm::Attributes::NoImplicitFloat);
977
978  QualType RetTy = FI.getReturnType();
979  unsigned Index = 1;
980  const ABIArgInfo &RetAI = FI.getReturnInfo();
981  switch (RetAI.getKind()) {
982  case ABIArgInfo::Extend:
983   if (RetTy->hasSignedIntegerRepresentation())
984     RetAttrs.addAttribute(llvm::Attributes::SExt);
985   else if (RetTy->hasUnsignedIntegerRepresentation())
986     RetAttrs.addAttribute(llvm::Attributes::ZExt);
987    break;
988  case ABIArgInfo::Direct:
989  case ABIArgInfo::Ignore:
990    break;
991
992  case ABIArgInfo::Indirect: {
993    llvm::AttrBuilder SRETAttrs;
994    SRETAttrs.addAttribute(llvm::Attributes::StructRet);
995    if (RetAI.getInReg())
996      SRETAttrs.addAttribute(llvm::Attributes::InReg);
997    PAL.push_back(llvm::
998                  AttributeWithIndex::get(Index,
999                                         llvm::Attributes::get(getLLVMContext(),
1000                                                               SRETAttrs)));
1001
1002    ++Index;
1003    // sret disables readnone and readonly
1004    FuncAttrs.removeAttribute(llvm::Attributes::ReadOnly)
1005      .removeAttribute(llvm::Attributes::ReadNone);
1006    break;
1007  }
1008
1009  case ABIArgInfo::Expand:
1010    llvm_unreachable("Invalid ABI kind for return argument");
1011  }
1012
1013  if (RetAttrs.hasAttributes())
1014    PAL.push_back(llvm::
1015                  AttributeWithIndex::get(llvm::AttrListPtr::ReturnIndex,
1016                                         llvm::Attributes::get(getLLVMContext(),
1017                                                               RetAttrs)));
1018
1019  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1020         ie = FI.arg_end(); it != ie; ++it) {
1021    QualType ParamType = it->type;
1022    const ABIArgInfo &AI = it->info;
1023    llvm::AttrBuilder Attrs;
1024
1025    if (AI.getPaddingType()) {
1026      if (AI.getPaddingInReg()) {
1027        llvm::AttrBuilder PadAttrs;
1028        PadAttrs.addAttribute(llvm::Attributes::InReg);
1029
1030        llvm::Attributes A =llvm::Attributes::get(getLLVMContext(), PadAttrs);
1031        PAL.push_back(llvm::AttributeWithIndex::get(Index, A));
1032      }
1033      // Increment Index if there is padding.
1034      ++Index;
1035    }
1036
1037    // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
1038    // have the corresponding parameter variable.  It doesn't make
1039    // sense to do it here because parameters are so messed up.
1040    switch (AI.getKind()) {
1041    case ABIArgInfo::Extend:
1042      if (ParamType->isSignedIntegerOrEnumerationType())
1043        Attrs.addAttribute(llvm::Attributes::SExt);
1044      else if (ParamType->isUnsignedIntegerOrEnumerationType())
1045        Attrs.addAttribute(llvm::Attributes::ZExt);
1046      // FALL THROUGH
1047    case ABIArgInfo::Direct:
1048      if (AI.getInReg())
1049        Attrs.addAttribute(llvm::Attributes::InReg);
1050
1051      // FIXME: handle sseregparm someday...
1052
1053      if (llvm::StructType *STy =
1054          dyn_cast<llvm::StructType>(AI.getCoerceToType())) {
1055        unsigned Extra = STy->getNumElements()-1;  // 1 will be added below.
1056        if (Attrs.hasAttributes())
1057          for (unsigned I = 0; I < Extra; ++I)
1058            PAL.push_back(llvm::AttributeWithIndex::get(Index + I,
1059                                         llvm::Attributes::get(getLLVMContext(),
1060                                                               Attrs)));
1061        Index += Extra;
1062      }
1063      break;
1064
1065    case ABIArgInfo::Indirect:
1066      if (AI.getInReg())
1067        Attrs.addAttribute(llvm::Attributes::InReg);
1068
1069      if (AI.getIndirectByVal())
1070        Attrs.addAttribute(llvm::Attributes::ByVal);
1071
1072      Attrs.addAlignmentAttr(AI.getIndirectAlign());
1073
1074      // byval disables readnone and readonly.
1075      FuncAttrs.removeAttribute(llvm::Attributes::ReadOnly)
1076        .removeAttribute(llvm::Attributes::ReadNone);
1077      break;
1078
1079    case ABIArgInfo::Ignore:
1080      // Skip increment, no matching LLVM parameter.
1081      continue;
1082
1083    case ABIArgInfo::Expand: {
1084      SmallVector<llvm::Type*, 8> types;
1085      // FIXME: This is rather inefficient. Do we ever actually need to do
1086      // anything here? The result should be just reconstructed on the other
1087      // side, so extension should be a non-issue.
1088      getTypes().GetExpandedTypes(ParamType, types);
1089      Index += types.size();
1090      continue;
1091    }
1092    }
1093
1094    if (Attrs.hasAttributes())
1095      PAL.push_back(llvm::AttributeWithIndex::get(Index,
1096                                         llvm::Attributes::get(getLLVMContext(),
1097                                                               Attrs)));
1098    ++Index;
1099  }
1100  if (FuncAttrs.hasAttributes())
1101    PAL.push_back(llvm::
1102                  AttributeWithIndex::get(llvm::AttrListPtr::FunctionIndex,
1103                                         llvm::Attributes::get(getLLVMContext(),
1104                                                               FuncAttrs)));
1105}
1106
1107/// An argument came in as a promoted argument; demote it back to its
1108/// declared type.
1109static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
1110                                         const VarDecl *var,
1111                                         llvm::Value *value) {
1112  llvm::Type *varType = CGF.ConvertType(var->getType());
1113
1114  // This can happen with promotions that actually don't change the
1115  // underlying type, like the enum promotions.
1116  if (value->getType() == varType) return value;
1117
1118  assert((varType->isIntegerTy() || varType->isFloatingPointTy())
1119         && "unexpected promotion type");
1120
1121  if (isa<llvm::IntegerType>(varType))
1122    return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
1123
1124  return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
1125}
1126
1127void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
1128                                         llvm::Function *Fn,
1129                                         const FunctionArgList &Args) {
1130  // If this is an implicit-return-zero function, go ahead and
1131  // initialize the return value.  TODO: it might be nice to have
1132  // a more general mechanism for this that didn't require synthesized
1133  // return statements.
1134  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
1135    if (FD->hasImplicitReturnZero()) {
1136      QualType RetTy = FD->getResultType().getUnqualifiedType();
1137      llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
1138      llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
1139      Builder.CreateStore(Zero, ReturnValue);
1140    }
1141  }
1142
1143  // FIXME: We no longer need the types from FunctionArgList; lift up and
1144  // simplify.
1145
1146  // Emit allocs for param decls.  Give the LLVM Argument nodes names.
1147  llvm::Function::arg_iterator AI = Fn->arg_begin();
1148
1149  // Name the struct return argument.
1150  if (CGM.ReturnTypeUsesSRet(FI)) {
1151    AI->setName("agg.result");
1152    AI->addAttr(llvm::Attributes::get(getLLVMContext(),
1153                                      llvm::Attributes::NoAlias));
1154    ++AI;
1155  }
1156
1157  assert(FI.arg_size() == Args.size() &&
1158         "Mismatch between function signature & arguments.");
1159  unsigned ArgNo = 1;
1160  CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
1161  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1162       i != e; ++i, ++info_it, ++ArgNo) {
1163    const VarDecl *Arg = *i;
1164    QualType Ty = info_it->type;
1165    const ABIArgInfo &ArgI = info_it->info;
1166
1167    bool isPromoted =
1168      isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
1169
1170    // Skip the dummy padding argument.
1171    if (ArgI.getPaddingType())
1172      ++AI;
1173
1174    switch (ArgI.getKind()) {
1175    case ABIArgInfo::Indirect: {
1176      llvm::Value *V = AI;
1177
1178      if (hasAggregateLLVMType(Ty)) {
1179        // Aggregates and complex variables are accessed by reference.  All we
1180        // need to do is realign the value, if requested
1181        if (ArgI.getIndirectRealign()) {
1182          llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
1183
1184          // Copy from the incoming argument pointer to the temporary with the
1185          // appropriate alignment.
1186          //
1187          // FIXME: We should have a common utility for generating an aggregate
1188          // copy.
1189          llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
1190          CharUnits Size = getContext().getTypeSizeInChars(Ty);
1191          llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
1192          llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
1193          Builder.CreateMemCpy(Dst,
1194                               Src,
1195                               llvm::ConstantInt::get(IntPtrTy,
1196                                                      Size.getQuantity()),
1197                               ArgI.getIndirectAlign(),
1198                               false);
1199          V = AlignedTemp;
1200        }
1201      } else {
1202        // Load scalar value from indirect argument.
1203        CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
1204        V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty);
1205
1206        if (isPromoted)
1207          V = emitArgumentDemotion(*this, Arg, V);
1208      }
1209      EmitParmDecl(*Arg, V, ArgNo);
1210      break;
1211    }
1212
1213    case ABIArgInfo::Extend:
1214    case ABIArgInfo::Direct: {
1215
1216      // If we have the trivial case, handle it with no muss and fuss.
1217      if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
1218          ArgI.getCoerceToType() == ConvertType(Ty) &&
1219          ArgI.getDirectOffset() == 0) {
1220        assert(AI != Fn->arg_end() && "Argument mismatch!");
1221        llvm::Value *V = AI;
1222
1223        if (Arg->getType().isRestrictQualified())
1224          AI->addAttr(llvm::Attributes::get(getLLVMContext(),
1225                                            llvm::Attributes::NoAlias));
1226
1227        // Ensure the argument is the correct type.
1228        if (V->getType() != ArgI.getCoerceToType())
1229          V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
1230
1231        if (isPromoted)
1232          V = emitArgumentDemotion(*this, Arg, V);
1233
1234        EmitParmDecl(*Arg, V, ArgNo);
1235        break;
1236      }
1237
1238      llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName());
1239
1240      // The alignment we need to use is the max of the requested alignment for
1241      // the argument plus the alignment required by our access code below.
1242      unsigned AlignmentToUse =
1243        CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType());
1244      AlignmentToUse = std::max(AlignmentToUse,
1245                        (unsigned)getContext().getDeclAlign(Arg).getQuantity());
1246
1247      Alloca->setAlignment(AlignmentToUse);
1248      llvm::Value *V = Alloca;
1249      llvm::Value *Ptr = V;    // Pointer to store into.
1250
1251      // If the value is offset in memory, apply the offset now.
1252      if (unsigned Offs = ArgI.getDirectOffset()) {
1253        Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
1254        Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
1255        Ptr = Builder.CreateBitCast(Ptr,
1256                          llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
1257      }
1258
1259      // If the coerce-to type is a first class aggregate, we flatten it and
1260      // pass the elements. Either way is semantically identical, but fast-isel
1261      // and the optimizer generally likes scalar values better than FCAs.
1262      llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
1263      if (STy && STy->getNumElements() > 1) {
1264        uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
1265        llvm::Type *DstTy =
1266          cast<llvm::PointerType>(Ptr->getType())->getElementType();
1267        uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
1268
1269        if (SrcSize <= DstSize) {
1270          Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
1271
1272          for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1273            assert(AI != Fn->arg_end() && "Argument mismatch!");
1274            AI->setName(Arg->getName() + ".coerce" + Twine(i));
1275            llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
1276            Builder.CreateStore(AI++, EltPtr);
1277          }
1278        } else {
1279          llvm::AllocaInst *TempAlloca =
1280            CreateTempAlloca(ArgI.getCoerceToType(), "coerce");
1281          TempAlloca->setAlignment(AlignmentToUse);
1282          llvm::Value *TempV = TempAlloca;
1283
1284          for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1285            assert(AI != Fn->arg_end() && "Argument mismatch!");
1286            AI->setName(Arg->getName() + ".coerce" + Twine(i));
1287            llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i);
1288            Builder.CreateStore(AI++, EltPtr);
1289          }
1290
1291          Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse);
1292        }
1293      } else {
1294        // Simple case, just do a coerced store of the argument into the alloca.
1295        assert(AI != Fn->arg_end() && "Argument mismatch!");
1296        AI->setName(Arg->getName() + ".coerce");
1297        CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
1298      }
1299
1300
1301      // Match to what EmitParmDecl is expecting for this type.
1302      if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
1303        V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty);
1304        if (isPromoted)
1305          V = emitArgumentDemotion(*this, Arg, V);
1306      }
1307      EmitParmDecl(*Arg, V, ArgNo);
1308      continue;  // Skip ++AI increment, already done.
1309    }
1310
1311    case ABIArgInfo::Expand: {
1312      // If this structure was expanded into multiple arguments then
1313      // we need to create a temporary and reconstruct it from the
1314      // arguments.
1315      llvm::AllocaInst *Alloca = CreateMemTemp(Ty);
1316      CharUnits Align = getContext().getDeclAlign(Arg);
1317      Alloca->setAlignment(Align.getQuantity());
1318      LValue LV = MakeAddrLValue(Alloca, Ty, Align);
1319      llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI);
1320      EmitParmDecl(*Arg, Alloca, ArgNo);
1321
1322      // Name the arguments used in expansion and increment AI.
1323      unsigned Index = 0;
1324      for (; AI != End; ++AI, ++Index)
1325        AI->setName(Arg->getName() + "." + Twine(Index));
1326      continue;
1327    }
1328
1329    case ABIArgInfo::Ignore:
1330      // Initialize the local variable appropriately.
1331      if (hasAggregateLLVMType(Ty))
1332        EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo);
1333      else
1334        EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())),
1335                     ArgNo);
1336
1337      // Skip increment, no matching LLVM parameter.
1338      continue;
1339    }
1340
1341    ++AI;
1342  }
1343  assert(AI == Fn->arg_end() && "Argument mismatch!");
1344}
1345
1346static void eraseUnusedBitCasts(llvm::Instruction *insn) {
1347  while (insn->use_empty()) {
1348    llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
1349    if (!bitcast) return;
1350
1351    // This is "safe" because we would have used a ConstantExpr otherwise.
1352    insn = cast<llvm::Instruction>(bitcast->getOperand(0));
1353    bitcast->eraseFromParent();
1354  }
1355}
1356
1357/// Try to emit a fused autorelease of a return result.
1358static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
1359                                                    llvm::Value *result) {
1360  // We must be immediately followed the cast.
1361  llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
1362  if (BB->empty()) return 0;
1363  if (&BB->back() != result) return 0;
1364
1365  llvm::Type *resultType = result->getType();
1366
1367  // result is in a BasicBlock and is therefore an Instruction.
1368  llvm::Instruction *generator = cast<llvm::Instruction>(result);
1369
1370  SmallVector<llvm::Instruction*,4> insnsToKill;
1371
1372  // Look for:
1373  //  %generator = bitcast %type1* %generator2 to %type2*
1374  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
1375    // We would have emitted this as a constant if the operand weren't
1376    // an Instruction.
1377    generator = cast<llvm::Instruction>(bitcast->getOperand(0));
1378
1379    // Require the generator to be immediately followed by the cast.
1380    if (generator->getNextNode() != bitcast)
1381      return 0;
1382
1383    insnsToKill.push_back(bitcast);
1384  }
1385
1386  // Look for:
1387  //   %generator = call i8* @objc_retain(i8* %originalResult)
1388  // or
1389  //   %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
1390  llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
1391  if (!call) return 0;
1392
1393  bool doRetainAutorelease;
1394
1395  if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) {
1396    doRetainAutorelease = true;
1397  } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints()
1398                                          .objc_retainAutoreleasedReturnValue) {
1399    doRetainAutorelease = false;
1400
1401    // If we emitted an assembly marker for this call (and the
1402    // ARCEntrypoints field should have been set if so), go looking
1403    // for that call.  If we can't find it, we can't do this
1404    // optimization.  But it should always be the immediately previous
1405    // instruction, unless we needed bitcasts around the call.
1406    if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) {
1407      llvm::Instruction *prev = call->getPrevNode();
1408      assert(prev);
1409      if (isa<llvm::BitCastInst>(prev)) {
1410        prev = prev->getPrevNode();
1411        assert(prev);
1412      }
1413      assert(isa<llvm::CallInst>(prev));
1414      assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
1415               CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker);
1416      insnsToKill.push_back(prev);
1417    }
1418  } else {
1419    return 0;
1420  }
1421
1422  result = call->getArgOperand(0);
1423  insnsToKill.push_back(call);
1424
1425  // Keep killing bitcasts, for sanity.  Note that we no longer care
1426  // about precise ordering as long as there's exactly one use.
1427  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
1428    if (!bitcast->hasOneUse()) break;
1429    insnsToKill.push_back(bitcast);
1430    result = bitcast->getOperand(0);
1431  }
1432
1433  // Delete all the unnecessary instructions, from latest to earliest.
1434  for (SmallVectorImpl<llvm::Instruction*>::iterator
1435         i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
1436    (*i)->eraseFromParent();
1437
1438  // Do the fused retain/autorelease if we were asked to.
1439  if (doRetainAutorelease)
1440    result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
1441
1442  // Cast back to the result type.
1443  return CGF.Builder.CreateBitCast(result, resultType);
1444}
1445
1446/// If this is a +1 of the value of an immutable 'self', remove it.
1447static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
1448                                          llvm::Value *result) {
1449  // This is only applicable to a method with an immutable 'self'.
1450  const ObjCMethodDecl *method =
1451    dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
1452  if (!method) return 0;
1453  const VarDecl *self = method->getSelfDecl();
1454  if (!self->getType().isConstQualified()) return 0;
1455
1456  // Look for a retain call.
1457  llvm::CallInst *retainCall =
1458    dyn_cast<llvm::CallInst>(result->stripPointerCasts());
1459  if (!retainCall ||
1460      retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain)
1461    return 0;
1462
1463  // Look for an ordinary load of 'self'.
1464  llvm::Value *retainedValue = retainCall->getArgOperand(0);
1465  llvm::LoadInst *load =
1466    dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
1467  if (!load || load->isAtomic() || load->isVolatile() ||
1468      load->getPointerOperand() != CGF.GetAddrOfLocalVar(self))
1469    return 0;
1470
1471  // Okay!  Burn it all down.  This relies for correctness on the
1472  // assumption that the retain is emitted as part of the return and
1473  // that thereafter everything is used "linearly".
1474  llvm::Type *resultType = result->getType();
1475  eraseUnusedBitCasts(cast<llvm::Instruction>(result));
1476  assert(retainCall->use_empty());
1477  retainCall->eraseFromParent();
1478  eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
1479
1480  return CGF.Builder.CreateBitCast(load, resultType);
1481}
1482
1483/// Emit an ARC autorelease of the result of a function.
1484///
1485/// \return the value to actually return from the function
1486static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
1487                                            llvm::Value *result) {
1488  // If we're returning 'self', kill the initial retain.  This is a
1489  // heuristic attempt to "encourage correctness" in the really unfortunate
1490  // case where we have a return of self during a dealloc and we desperately
1491  // need to avoid the possible autorelease.
1492  if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
1493    return self;
1494
1495  // At -O0, try to emit a fused retain/autorelease.
1496  if (CGF.shouldUseFusedARCCalls())
1497    if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
1498      return fused;
1499
1500  return CGF.EmitARCAutoreleaseReturnValue(result);
1501}
1502
1503/// Heuristically search for a dominating store to the return-value slot.
1504static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
1505  // If there are multiple uses of the return-value slot, just check
1506  // for something immediately preceding the IP.  Sometimes this can
1507  // happen with how we generate implicit-returns; it can also happen
1508  // with noreturn cleanups.
1509  if (!CGF.ReturnValue->hasOneUse()) {
1510    llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
1511    if (IP->empty()) return 0;
1512    llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back());
1513    if (!store) return 0;
1514    if (store->getPointerOperand() != CGF.ReturnValue) return 0;
1515    assert(!store->isAtomic() && !store->isVolatile()); // see below
1516    return store;
1517  }
1518
1519  llvm::StoreInst *store =
1520    dyn_cast<llvm::StoreInst>(CGF.ReturnValue->use_back());
1521  if (!store) return 0;
1522
1523  // These aren't actually possible for non-coerced returns, and we
1524  // only care about non-coerced returns on this code path.
1525  assert(!store->isAtomic() && !store->isVolatile());
1526
1527  // Now do a first-and-dirty dominance check: just walk up the
1528  // single-predecessors chain from the current insertion point.
1529  llvm::BasicBlock *StoreBB = store->getParent();
1530  llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
1531  while (IP != StoreBB) {
1532    if (!(IP = IP->getSinglePredecessor()))
1533      return 0;
1534  }
1535
1536  // Okay, the store's basic block dominates the insertion point; we
1537  // can do our thing.
1538  return store;
1539}
1540
1541void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
1542  // Functions with no result always return void.
1543  if (ReturnValue == 0) {
1544    Builder.CreateRetVoid();
1545    return;
1546  }
1547
1548  llvm::DebugLoc RetDbgLoc;
1549  llvm::Value *RV = 0;
1550  QualType RetTy = FI.getReturnType();
1551  const ABIArgInfo &RetAI = FI.getReturnInfo();
1552
1553  switch (RetAI.getKind()) {
1554  case ABIArgInfo::Indirect: {
1555    unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
1556    if (RetTy->isAnyComplexType()) {
1557      ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
1558      StoreComplexToAddr(RT, CurFn->arg_begin(), false);
1559    } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1560      // Do nothing; aggregrates get evaluated directly into the destination.
1561    } else {
1562      EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
1563                        false, Alignment, RetTy);
1564    }
1565    break;
1566  }
1567
1568  case ABIArgInfo::Extend:
1569  case ABIArgInfo::Direct:
1570    if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
1571        RetAI.getDirectOffset() == 0) {
1572      // The internal return value temp always will have pointer-to-return-type
1573      // type, just do a load.
1574
1575      // If there is a dominating store to ReturnValue, we can elide
1576      // the load, zap the store, and usually zap the alloca.
1577      if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) {
1578        // Get the stored value and nuke the now-dead store.
1579        RetDbgLoc = SI->getDebugLoc();
1580        RV = SI->getValueOperand();
1581        SI->eraseFromParent();
1582
1583        // If that was the only use of the return value, nuke it as well now.
1584        if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
1585          cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
1586          ReturnValue = 0;
1587        }
1588
1589      // Otherwise, we have to do a simple load.
1590      } else {
1591        RV = Builder.CreateLoad(ReturnValue);
1592      }
1593    } else {
1594      llvm::Value *V = ReturnValue;
1595      // If the value is offset in memory, apply the offset now.
1596      if (unsigned Offs = RetAI.getDirectOffset()) {
1597        V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
1598        V = Builder.CreateConstGEP1_32(V, Offs);
1599        V = Builder.CreateBitCast(V,
1600                         llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
1601      }
1602
1603      RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
1604    }
1605
1606    // In ARC, end functions that return a retainable type with a call
1607    // to objc_autoreleaseReturnValue.
1608    if (AutoreleaseResult) {
1609      assert(getLangOpts().ObjCAutoRefCount &&
1610             !FI.isReturnsRetained() &&
1611             RetTy->isObjCRetainableType());
1612      RV = emitAutoreleaseOfResult(*this, RV);
1613    }
1614
1615    break;
1616
1617  case ABIArgInfo::Ignore:
1618    break;
1619
1620  case ABIArgInfo::Expand:
1621    llvm_unreachable("Invalid ABI kind for return argument");
1622  }
1623
1624  llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
1625  if (!RetDbgLoc.isUnknown())
1626    Ret->setDebugLoc(RetDbgLoc);
1627}
1628
1629void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
1630                                          const VarDecl *param) {
1631  // StartFunction converted the ABI-lowered parameter(s) into a
1632  // local alloca.  We need to turn that into an r-value suitable
1633  // for EmitCall.
1634  llvm::Value *local = GetAddrOfLocalVar(param);
1635
1636  QualType type = param->getType();
1637
1638  // For the most part, we just need to load the alloca, except:
1639  // 1) aggregate r-values are actually pointers to temporaries, and
1640  // 2) references to aggregates are pointers directly to the aggregate.
1641  // I don't know why references to non-aggregates are different here.
1642  if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
1643    if (hasAggregateLLVMType(ref->getPointeeType()))
1644      return args.add(RValue::getAggregate(local), type);
1645
1646    // Locals which are references to scalars are represented
1647    // with allocas holding the pointer.
1648    return args.add(RValue::get(Builder.CreateLoad(local)), type);
1649  }
1650
1651  if (type->isAnyComplexType()) {
1652    ComplexPairTy complex = LoadComplexFromAddr(local, /*volatile*/ false);
1653    return args.add(RValue::getComplex(complex), type);
1654  }
1655
1656  if (hasAggregateLLVMType(type))
1657    return args.add(RValue::getAggregate(local), type);
1658
1659  unsigned alignment = getContext().getDeclAlign(param).getQuantity();
1660  llvm::Value *value = EmitLoadOfScalar(local, false, alignment, type);
1661  return args.add(RValue::get(value), type);
1662}
1663
1664static bool isProvablyNull(llvm::Value *addr) {
1665  return isa<llvm::ConstantPointerNull>(addr);
1666}
1667
1668static bool isProvablyNonNull(llvm::Value *addr) {
1669  return isa<llvm::AllocaInst>(addr);
1670}
1671
1672/// Emit the actual writing-back of a writeback.
1673static void emitWriteback(CodeGenFunction &CGF,
1674                          const CallArgList::Writeback &writeback) {
1675  llvm::Value *srcAddr = writeback.Address;
1676  assert(!isProvablyNull(srcAddr) &&
1677         "shouldn't have writeback for provably null argument");
1678
1679  llvm::BasicBlock *contBB = 0;
1680
1681  // If the argument wasn't provably non-null, we need to null check
1682  // before doing the store.
1683  bool provablyNonNull = isProvablyNonNull(srcAddr);
1684  if (!provablyNonNull) {
1685    llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
1686    contBB = CGF.createBasicBlock("icr.done");
1687
1688    llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
1689    CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
1690    CGF.EmitBlock(writebackBB);
1691  }
1692
1693  // Load the value to writeback.
1694  llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
1695
1696  // Cast it back, in case we're writing an id to a Foo* or something.
1697  value = CGF.Builder.CreateBitCast(value,
1698               cast<llvm::PointerType>(srcAddr->getType())->getElementType(),
1699                            "icr.writeback-cast");
1700
1701  // Perform the writeback.
1702  QualType srcAddrType = writeback.AddressType;
1703  CGF.EmitStoreThroughLValue(RValue::get(value),
1704                             CGF.MakeAddrLValue(srcAddr, srcAddrType));
1705
1706  // Jump to the continuation block.
1707  if (!provablyNonNull)
1708    CGF.EmitBlock(contBB);
1709}
1710
1711static void emitWritebacks(CodeGenFunction &CGF,
1712                           const CallArgList &args) {
1713  for (CallArgList::writeback_iterator
1714         i = args.writeback_begin(), e = args.writeback_end(); i != e; ++i)
1715    emitWriteback(CGF, *i);
1716}
1717
1718/// Emit an argument that's being passed call-by-writeback.  That is,
1719/// we are passing the address of
1720static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
1721                             const ObjCIndirectCopyRestoreExpr *CRE) {
1722  llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
1723
1724  // The dest and src types don't necessarily match in LLVM terms
1725  // because of the crazy ObjC compatibility rules.
1726
1727  llvm::PointerType *destType =
1728    cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
1729
1730  // If the address is a constant null, just pass the appropriate null.
1731  if (isProvablyNull(srcAddr)) {
1732    args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
1733             CRE->getType());
1734    return;
1735  }
1736
1737  QualType srcAddrType =
1738    CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
1739
1740  // Create the temporary.
1741  llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(),
1742                                           "icr.temp");
1743
1744  // Zero-initialize it if we're not doing a copy-initialization.
1745  bool shouldCopy = CRE->shouldCopy();
1746  if (!shouldCopy) {
1747    llvm::Value *null =
1748      llvm::ConstantPointerNull::get(
1749        cast<llvm::PointerType>(destType->getElementType()));
1750    CGF.Builder.CreateStore(null, temp);
1751  }
1752
1753  llvm::BasicBlock *contBB = 0;
1754
1755  // If the address is *not* known to be non-null, we need to switch.
1756  llvm::Value *finalArgument;
1757
1758  bool provablyNonNull = isProvablyNonNull(srcAddr);
1759  if (provablyNonNull) {
1760    finalArgument = temp;
1761  } else {
1762    llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
1763
1764    finalArgument = CGF.Builder.CreateSelect(isNull,
1765                                   llvm::ConstantPointerNull::get(destType),
1766                                             temp, "icr.argument");
1767
1768    // If we need to copy, then the load has to be conditional, which
1769    // means we need control flow.
1770    if (shouldCopy) {
1771      contBB = CGF.createBasicBlock("icr.cont");
1772      llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
1773      CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
1774      CGF.EmitBlock(copyBB);
1775    }
1776  }
1777
1778  // Perform a copy if necessary.
1779  if (shouldCopy) {
1780    LValue srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
1781    RValue srcRV = CGF.EmitLoadOfLValue(srcLV);
1782    assert(srcRV.isScalar());
1783
1784    llvm::Value *src = srcRV.getScalarVal();
1785    src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
1786                                    "icr.cast");
1787
1788    // Use an ordinary store, not a store-to-lvalue.
1789    CGF.Builder.CreateStore(src, temp);
1790  }
1791
1792  // Finish the control flow if we needed it.
1793  if (shouldCopy && !provablyNonNull)
1794    CGF.EmitBlock(contBB);
1795
1796  args.addWriteback(srcAddr, srcAddrType, temp);
1797  args.add(RValue::get(finalArgument), CRE->getType());
1798}
1799
1800void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
1801                                  QualType type) {
1802  if (const ObjCIndirectCopyRestoreExpr *CRE
1803        = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
1804    assert(getLangOpts().ObjCAutoRefCount);
1805    assert(getContext().hasSameType(E->getType(), type));
1806    return emitWritebackArg(*this, args, CRE);
1807  }
1808
1809  assert(type->isReferenceType() == E->isGLValue() &&
1810         "reference binding to unmaterialized r-value!");
1811
1812  if (E->isGLValue()) {
1813    assert(E->getObjectKind() == OK_Ordinary);
1814    return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0),
1815                    type);
1816  }
1817
1818  if (hasAggregateLLVMType(type) && !E->getType()->isAnyComplexType() &&
1819      isa<ImplicitCastExpr>(E) &&
1820      cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
1821    LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
1822    assert(L.isSimple());
1823    args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
1824    return;
1825  }
1826
1827  args.add(EmitAnyExprToTemp(E), type);
1828}
1829
1830// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
1831// optimizer it can aggressively ignore unwind edges.
1832void
1833CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
1834  if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
1835      !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
1836    Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
1837                      CGM.getNoObjCARCExceptionsMetadata());
1838}
1839
1840/// Emits a call or invoke instruction to the given function, depending
1841/// on the current state of the EH stack.
1842llvm::CallSite
1843CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
1844                                  ArrayRef<llvm::Value *> Args,
1845                                  const Twine &Name) {
1846  llvm::BasicBlock *InvokeDest = getInvokeDest();
1847
1848  llvm::Instruction *Inst;
1849  if (!InvokeDest)
1850    Inst = Builder.CreateCall(Callee, Args, Name);
1851  else {
1852    llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
1853    Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name);
1854    EmitBlock(ContBB);
1855  }
1856
1857  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
1858  // optimizer it can aggressively ignore unwind edges.
1859  if (CGM.getLangOpts().ObjCAutoRefCount)
1860    AddObjCARCExceptionMetadata(Inst);
1861
1862  return Inst;
1863}
1864
1865llvm::CallSite
1866CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
1867                                  const Twine &Name) {
1868  return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name);
1869}
1870
1871static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo,
1872                            llvm::FunctionType *FTy) {
1873  if (ArgNo < FTy->getNumParams())
1874    assert(Elt->getType() == FTy->getParamType(ArgNo));
1875  else
1876    assert(FTy->isVarArg());
1877  ++ArgNo;
1878}
1879
1880void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
1881                                       SmallVector<llvm::Value*,16> &Args,
1882                                       llvm::FunctionType *IRFuncTy) {
1883  if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
1884    unsigned NumElts = AT->getSize().getZExtValue();
1885    QualType EltTy = AT->getElementType();
1886    llvm::Value *Addr = RV.getAggregateAddr();
1887    for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
1888      llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
1889      LValue LV = MakeAddrLValue(EltAddr, EltTy);
1890      RValue EltRV;
1891      if (EltTy->isAnyComplexType())
1892        // FIXME: Volatile?
1893        EltRV = RValue::getComplex(LoadComplexFromAddr(LV.getAddress(), false));
1894      else if (CodeGenFunction::hasAggregateLLVMType(EltTy))
1895        EltRV = LV.asAggregateRValue();
1896      else
1897        EltRV = EmitLoadOfLValue(LV);
1898      ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy);
1899    }
1900  } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
1901    RecordDecl *RD = RT->getDecl();
1902    assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
1903    LValue LV = MakeAddrLValue(RV.getAggregateAddr(), Ty);
1904
1905    if (RD->isUnion()) {
1906      const FieldDecl *LargestFD = 0;
1907      CharUnits UnionSize = CharUnits::Zero();
1908
1909      for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1910           i != e; ++i) {
1911        const FieldDecl *FD = *i;
1912        assert(!FD->isBitField() &&
1913               "Cannot expand structure with bit-field members.");
1914        CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
1915        if (UnionSize < FieldSize) {
1916          UnionSize = FieldSize;
1917          LargestFD = FD;
1918        }
1919      }
1920      if (LargestFD) {
1921        RValue FldRV = EmitRValueForField(LV, LargestFD);
1922        ExpandTypeToArgs(LargestFD->getType(), FldRV, Args, IRFuncTy);
1923      }
1924    } else {
1925      for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1926           i != e; ++i) {
1927        FieldDecl *FD = *i;
1928
1929        RValue FldRV = EmitRValueForField(LV, FD);
1930        ExpandTypeToArgs(FD->getType(), FldRV, Args, IRFuncTy);
1931      }
1932    }
1933  } else if (Ty->isAnyComplexType()) {
1934    ComplexPairTy CV = RV.getComplexVal();
1935    Args.push_back(CV.first);
1936    Args.push_back(CV.second);
1937  } else {
1938    assert(RV.isScalar() &&
1939           "Unexpected non-scalar rvalue during struct expansion.");
1940
1941    // Insert a bitcast as needed.
1942    llvm::Value *V = RV.getScalarVal();
1943    if (Args.size() < IRFuncTy->getNumParams() &&
1944        V->getType() != IRFuncTy->getParamType(Args.size()))
1945      V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size()));
1946
1947    Args.push_back(V);
1948  }
1949}
1950
1951
1952RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
1953                                 llvm::Value *Callee,
1954                                 ReturnValueSlot ReturnValue,
1955                                 const CallArgList &CallArgs,
1956                                 const Decl *TargetDecl,
1957                                 llvm::Instruction **callOrInvoke) {
1958  // FIXME: We no longer need the types from CallArgs; lift up and simplify.
1959  SmallVector<llvm::Value*, 16> Args;
1960
1961  // Handle struct-return functions by passing a pointer to the
1962  // location that we would like to return into.
1963  QualType RetTy = CallInfo.getReturnType();
1964  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
1965
1966  // IRArgNo - Keep track of the argument number in the callee we're looking at.
1967  unsigned IRArgNo = 0;
1968  llvm::FunctionType *IRFuncTy =
1969    cast<llvm::FunctionType>(
1970                  cast<llvm::PointerType>(Callee->getType())->getElementType());
1971
1972  // If the call returns a temporary with struct return, create a temporary
1973  // alloca to hold the result, unless one is given to us.
1974  if (CGM.ReturnTypeUsesSRet(CallInfo)) {
1975    llvm::Value *Value = ReturnValue.getValue();
1976    if (!Value)
1977      Value = CreateMemTemp(RetTy);
1978    Args.push_back(Value);
1979    checkArgMatches(Value, IRArgNo, IRFuncTy);
1980  }
1981
1982  assert(CallInfo.arg_size() == CallArgs.size() &&
1983         "Mismatch between function signature & arguments.");
1984  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
1985  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
1986       I != E; ++I, ++info_it) {
1987    const ABIArgInfo &ArgInfo = info_it->info;
1988    RValue RV = I->RV;
1989
1990    unsigned TypeAlign =
1991      getContext().getTypeAlignInChars(I->Ty).getQuantity();
1992
1993    // Insert a padding argument to ensure proper alignment.
1994    if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) {
1995      Args.push_back(llvm::UndefValue::get(PaddingType));
1996      ++IRArgNo;
1997    }
1998
1999    switch (ArgInfo.getKind()) {
2000    case ABIArgInfo::Indirect: {
2001      if (RV.isScalar() || RV.isComplex()) {
2002        // Make a temporary alloca to pass the argument.
2003        llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
2004        if (ArgInfo.getIndirectAlign() > AI->getAlignment())
2005          AI->setAlignment(ArgInfo.getIndirectAlign());
2006        Args.push_back(AI);
2007
2008        if (RV.isScalar())
2009          EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false,
2010                            TypeAlign, I->Ty);
2011        else
2012          StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
2013
2014        // Validate argument match.
2015        checkArgMatches(AI, IRArgNo, IRFuncTy);
2016      } else {
2017        // We want to avoid creating an unnecessary temporary+copy here;
2018        // however, we need one in two cases:
2019        // 1. If the argument is not byval, and we are required to copy the
2020        //    source.  (This case doesn't occur on any common architecture.)
2021        // 2. If the argument is byval, RV is not sufficiently aligned, and
2022        //    we cannot force it to be sufficiently aligned.
2023        llvm::Value *Addr = RV.getAggregateAddr();
2024        unsigned Align = ArgInfo.getIndirectAlign();
2025        const llvm::DataLayout *TD = &CGM.getDataLayout();
2026        if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
2027            (ArgInfo.getIndirectByVal() && TypeAlign < Align &&
2028             llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align)) {
2029          // Create an aligned temporary, and copy to it.
2030          llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
2031          if (Align > AI->getAlignment())
2032            AI->setAlignment(Align);
2033          Args.push_back(AI);
2034          EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
2035
2036          // Validate argument match.
2037          checkArgMatches(AI, IRArgNo, IRFuncTy);
2038        } else {
2039          // Skip the extra memcpy call.
2040          Args.push_back(Addr);
2041
2042          // Validate argument match.
2043          checkArgMatches(Addr, IRArgNo, IRFuncTy);
2044        }
2045      }
2046      break;
2047    }
2048
2049    case ABIArgInfo::Ignore:
2050      break;
2051
2052    case ABIArgInfo::Extend:
2053    case ABIArgInfo::Direct: {
2054      if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
2055          ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
2056          ArgInfo.getDirectOffset() == 0) {
2057        llvm::Value *V;
2058        if (RV.isScalar())
2059          V = RV.getScalarVal();
2060        else
2061          V = Builder.CreateLoad(RV.getAggregateAddr());
2062
2063        // If the argument doesn't match, perform a bitcast to coerce it.  This
2064        // can happen due to trivial type mismatches.
2065        if (IRArgNo < IRFuncTy->getNumParams() &&
2066            V->getType() != IRFuncTy->getParamType(IRArgNo))
2067          V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo));
2068        Args.push_back(V);
2069
2070        checkArgMatches(V, IRArgNo, IRFuncTy);
2071        break;
2072      }
2073
2074      // FIXME: Avoid the conversion through memory if possible.
2075      llvm::Value *SrcPtr;
2076      if (RV.isScalar()) {
2077        SrcPtr = CreateMemTemp(I->Ty, "coerce");
2078        EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, TypeAlign, I->Ty);
2079      } else if (RV.isComplex()) {
2080        SrcPtr = CreateMemTemp(I->Ty, "coerce");
2081        StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
2082      } else
2083        SrcPtr = RV.getAggregateAddr();
2084
2085      // If the value is offset in memory, apply the offset now.
2086      if (unsigned Offs = ArgInfo.getDirectOffset()) {
2087        SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
2088        SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
2089        SrcPtr = Builder.CreateBitCast(SrcPtr,
2090                       llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
2091
2092      }
2093
2094      // If the coerce-to type is a first class aggregate, we flatten it and
2095      // pass the elements. Either way is semantically identical, but fast-isel
2096      // and the optimizer generally likes scalar values better than FCAs.
2097      if (llvm::StructType *STy =
2098            dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
2099        llvm::Type *SrcTy =
2100          cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
2101        uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
2102        uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
2103
2104        // If the source type is smaller than the destination type of the
2105        // coerce-to logic, copy the source value into a temp alloca the size
2106        // of the destination type to allow loading all of it. The bits past
2107        // the source value are left undef.
2108        if (SrcSize < DstSize) {
2109          llvm::AllocaInst *TempAlloca
2110            = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce");
2111          Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0);
2112          SrcPtr = TempAlloca;
2113        } else {
2114          SrcPtr = Builder.CreateBitCast(SrcPtr,
2115                                         llvm::PointerType::getUnqual(STy));
2116        }
2117
2118        for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2119          llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
2120          llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
2121          // We don't know what we're loading from.
2122          LI->setAlignment(1);
2123          Args.push_back(LI);
2124
2125          // Validate argument match.
2126          checkArgMatches(LI, IRArgNo, IRFuncTy);
2127        }
2128      } else {
2129        // In the simple case, just pass the coerced loaded value.
2130        Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
2131                                         *this));
2132
2133        // Validate argument match.
2134        checkArgMatches(Args.back(), IRArgNo, IRFuncTy);
2135      }
2136
2137      break;
2138    }
2139
2140    case ABIArgInfo::Expand:
2141      ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy);
2142      IRArgNo = Args.size();
2143      break;
2144    }
2145  }
2146
2147  // If the callee is a bitcast of a function to a varargs pointer to function
2148  // type, check to see if we can remove the bitcast.  This handles some cases
2149  // with unprototyped functions.
2150  if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
2151    if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
2152      llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
2153      llvm::FunctionType *CurFT =
2154        cast<llvm::FunctionType>(CurPT->getElementType());
2155      llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
2156
2157      if (CE->getOpcode() == llvm::Instruction::BitCast &&
2158          ActualFT->getReturnType() == CurFT->getReturnType() &&
2159          ActualFT->getNumParams() == CurFT->getNumParams() &&
2160          ActualFT->getNumParams() == Args.size() &&
2161          (CurFT->isVarArg() || !ActualFT->isVarArg())) {
2162        bool ArgsMatch = true;
2163        for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
2164          if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
2165            ArgsMatch = false;
2166            break;
2167          }
2168
2169        // Strip the cast if we can get away with it.  This is a nice cleanup,
2170        // but also allows us to inline the function at -O0 if it is marked
2171        // always_inline.
2172        if (ArgsMatch)
2173          Callee = CalleeF;
2174      }
2175    }
2176
2177  unsigned CallingConv;
2178  CodeGen::AttributeListType AttributeList;
2179  CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
2180  llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(getLLVMContext(),
2181                                                   AttributeList);
2182
2183  llvm::BasicBlock *InvokeDest = 0;
2184  if (!Attrs.getFnAttributes().hasAttribute(llvm::Attributes::NoUnwind))
2185    InvokeDest = getInvokeDest();
2186
2187  llvm::CallSite CS;
2188  if (!InvokeDest) {
2189    CS = Builder.CreateCall(Callee, Args);
2190  } else {
2191    llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
2192    CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args);
2193    EmitBlock(Cont);
2194  }
2195  if (callOrInvoke)
2196    *callOrInvoke = CS.getInstruction();
2197
2198  CS.setAttributes(Attrs);
2199  CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
2200
2201  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
2202  // optimizer it can aggressively ignore unwind edges.
2203  if (CGM.getLangOpts().ObjCAutoRefCount)
2204    AddObjCARCExceptionMetadata(CS.getInstruction());
2205
2206  // If the call doesn't return, finish the basic block and clear the
2207  // insertion point; this allows the rest of IRgen to discard
2208  // unreachable code.
2209  if (CS.doesNotReturn()) {
2210    Builder.CreateUnreachable();
2211    Builder.ClearInsertionPoint();
2212
2213    // FIXME: For now, emit a dummy basic block because expr emitters in
2214    // generally are not ready to handle emitting expressions at unreachable
2215    // points.
2216    EnsureInsertPoint();
2217
2218    // Return a reasonable RValue.
2219    return GetUndefRValue(RetTy);
2220  }
2221
2222  llvm::Instruction *CI = CS.getInstruction();
2223  if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
2224    CI->setName("call");
2225
2226  // Emit any writebacks immediately.  Arguably this should happen
2227  // after any return-value munging.
2228  if (CallArgs.hasWritebacks())
2229    emitWritebacks(*this, CallArgs);
2230
2231  switch (RetAI.getKind()) {
2232  case ABIArgInfo::Indirect: {
2233    unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
2234    if (RetTy->isAnyComplexType())
2235      return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
2236    if (CodeGenFunction::hasAggregateLLVMType(RetTy))
2237      return RValue::getAggregate(Args[0]);
2238    return RValue::get(EmitLoadOfScalar(Args[0], false, Alignment, RetTy));
2239  }
2240
2241  case ABIArgInfo::Ignore:
2242    // If we are ignoring an argument that had a result, make sure to
2243    // construct the appropriate return value for our caller.
2244    return GetUndefRValue(RetTy);
2245
2246  case ABIArgInfo::Extend:
2247  case ABIArgInfo::Direct: {
2248    llvm::Type *RetIRTy = ConvertType(RetTy);
2249    if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
2250      if (RetTy->isAnyComplexType()) {
2251        llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
2252        llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
2253        return RValue::getComplex(std::make_pair(Real, Imag));
2254      }
2255      if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
2256        llvm::Value *DestPtr = ReturnValue.getValue();
2257        bool DestIsVolatile = ReturnValue.isVolatile();
2258
2259        if (!DestPtr) {
2260          DestPtr = CreateMemTemp(RetTy, "agg.tmp");
2261          DestIsVolatile = false;
2262        }
2263        BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
2264        return RValue::getAggregate(DestPtr);
2265      }
2266
2267      // If the argument doesn't match, perform a bitcast to coerce it.  This
2268      // can happen due to trivial type mismatches.
2269      llvm::Value *V = CI;
2270      if (V->getType() != RetIRTy)
2271        V = Builder.CreateBitCast(V, RetIRTy);
2272      return RValue::get(V);
2273    }
2274
2275    llvm::Value *DestPtr = ReturnValue.getValue();
2276    bool DestIsVolatile = ReturnValue.isVolatile();
2277
2278    if (!DestPtr) {
2279      DestPtr = CreateMemTemp(RetTy, "coerce");
2280      DestIsVolatile = false;
2281    }
2282
2283    // If the value is offset in memory, apply the offset now.
2284    llvm::Value *StorePtr = DestPtr;
2285    if (unsigned Offs = RetAI.getDirectOffset()) {
2286      StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
2287      StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
2288      StorePtr = Builder.CreateBitCast(StorePtr,
2289                         llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
2290    }
2291    CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
2292
2293    unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
2294    if (RetTy->isAnyComplexType())
2295      return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
2296    if (CodeGenFunction::hasAggregateLLVMType(RetTy))
2297      return RValue::getAggregate(DestPtr);
2298    return RValue::get(EmitLoadOfScalar(DestPtr, false, Alignment, RetTy));
2299  }
2300
2301  case ABIArgInfo::Expand:
2302    llvm_unreachable("Invalid ABI kind for return argument");
2303  }
2304
2305  llvm_unreachable("Unhandled ABIArgInfo::Kind");
2306}
2307
2308/* VarArg handling */
2309
2310llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
2311  return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
2312}
2313