CGCall.cpp revision ef072033876e295ec5d3402f8730a3ae358ad815
1//===--- CGCall.cpp - Encapsulate calling convention details ----*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// These classes wrap the information about a call or function
11// definition used to handle ABI compliancy.
12//
13//===----------------------------------------------------------------------===//
14
15#include "CGCall.h"
16#include "ABIInfo.h"
17#include "CGCXXABI.h"
18#include "CodeGenFunction.h"
19#include "CodeGenModule.h"
20#include "TargetInfo.h"
21#include "clang/AST/Decl.h"
22#include "clang/AST/DeclCXX.h"
23#include "clang/AST/DeclObjC.h"
24#include "clang/Basic/TargetInfo.h"
25#include "clang/Frontend/CodeGenOptions.h"
26#include "llvm/ADT/StringExtras.h"
27#include "llvm/IR/Attributes.h"
28#include "llvm/IR/DataLayout.h"
29#include "llvm/IR/InlineAsm.h"
30#include "llvm/MC/SubtargetFeature.h"
31#include "llvm/Support/CallSite.h"
32#include "llvm/Transforms/Utils/Local.h"
33using namespace clang;
34using namespace CodeGen;
35
36/***/
37
38static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
39  switch (CC) {
40  default: return llvm::CallingConv::C;
41  case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
42  case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
43  case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
44  case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
45  case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
46  case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
47  // TODO: add support for CC_X86Pascal to llvm
48  }
49}
50
51/// Derives the 'this' type for codegen purposes, i.e. ignoring method
52/// qualification.
53/// FIXME: address space qualification?
54static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
55  QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
56  return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
57}
58
59/// Returns the canonical formal type of the given C++ method.
60static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
61  return MD->getType()->getCanonicalTypeUnqualified()
62           .getAs<FunctionProtoType>();
63}
64
65/// Returns the "extra-canonicalized" return type, which discards
66/// qualifiers on the return type.  Codegen doesn't care about them,
67/// and it makes ABI code a little easier to be able to assume that
68/// all parameter and return types are top-level unqualified.
69static CanQualType GetReturnType(QualType RetTy) {
70  return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
71}
72
73/// Arrange the argument and result information for a value of the given
74/// unprototyped freestanding function type.
75const CGFunctionInfo &
76CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
77  // When translating an unprototyped function type, always use a
78  // variadic type.
79  return arrangeLLVMFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
80                                 None, FTNP->getExtInfo(), RequiredArgs(0));
81}
82
83/// Arrange the LLVM function layout for a value of the given function
84/// type, on top of any implicit parameters already stored.  Use the
85/// given ExtInfo instead of the ExtInfo from the function type.
86static const CGFunctionInfo &arrangeLLVMFunctionInfo(CodeGenTypes &CGT,
87                                       SmallVectorImpl<CanQualType> &prefix,
88                                             CanQual<FunctionProtoType> FTP,
89                                              FunctionType::ExtInfo extInfo) {
90  RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
91  // FIXME: Kill copy.
92  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
93    prefix.push_back(FTP->getArgType(i));
94  CanQualType resultType = FTP->getResultType().getUnqualifiedType();
95  return CGT.arrangeLLVMFunctionInfo(resultType, prefix, extInfo, required);
96}
97
98/// Arrange the argument and result information for a free function (i.e.
99/// not a C++ or ObjC instance method) of the given type.
100static const CGFunctionInfo &arrangeFreeFunctionType(CodeGenTypes &CGT,
101                                      SmallVectorImpl<CanQualType> &prefix,
102                                            CanQual<FunctionProtoType> FTP) {
103  return arrangeLLVMFunctionInfo(CGT, prefix, FTP, FTP->getExtInfo());
104}
105
106/// Arrange the argument and result information for a free function (i.e.
107/// not a C++ or ObjC instance method) of the given type.
108static const CGFunctionInfo &arrangeCXXMethodType(CodeGenTypes &CGT,
109                                      SmallVectorImpl<CanQualType> &prefix,
110                                            CanQual<FunctionProtoType> FTP) {
111  FunctionType::ExtInfo extInfo = FTP->getExtInfo();
112  return arrangeLLVMFunctionInfo(CGT, prefix, FTP, extInfo);
113}
114
115/// Arrange the argument and result information for a value of the
116/// given freestanding function type.
117const CGFunctionInfo &
118CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
119  SmallVector<CanQualType, 16> argTypes;
120  return ::arrangeFreeFunctionType(*this, argTypes, FTP);
121}
122
123static CallingConv getCallingConventionForDecl(const Decl *D) {
124  // Set the appropriate calling convention for the Function.
125  if (D->hasAttr<StdCallAttr>())
126    return CC_X86StdCall;
127
128  if (D->hasAttr<FastCallAttr>())
129    return CC_X86FastCall;
130
131  if (D->hasAttr<ThisCallAttr>())
132    return CC_X86ThisCall;
133
134  if (D->hasAttr<PascalAttr>())
135    return CC_X86Pascal;
136
137  if (PcsAttr *PCS = D->getAttr<PcsAttr>())
138    return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
139
140  if (D->hasAttr<PnaclCallAttr>())
141    return CC_PnaclCall;
142
143  if (D->hasAttr<IntelOclBiccAttr>())
144    return CC_IntelOclBicc;
145
146  return CC_C;
147}
148
149/// Arrange the argument and result information for a call to an
150/// unknown C++ non-static member function of the given abstract type.
151/// (Zero value of RD means we don't have any meaningful "this" argument type,
152///  so fall back to a generic pointer type).
153/// The member function must be an ordinary function, i.e. not a
154/// constructor or destructor.
155const CGFunctionInfo &
156CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
157                                   const FunctionProtoType *FTP) {
158  SmallVector<CanQualType, 16> argTypes;
159
160  // Add the 'this' pointer.
161  if (RD)
162    argTypes.push_back(GetThisType(Context, RD));
163  else
164    argTypes.push_back(Context.VoidPtrTy);
165
166  return ::arrangeCXXMethodType(*this, argTypes,
167              FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
168}
169
170/// Arrange the argument and result information for a declaration or
171/// definition of the given C++ non-static member function.  The
172/// member function must be an ordinary function, i.e. not a
173/// constructor or destructor.
174const CGFunctionInfo &
175CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
176  assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!");
177  assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
178
179  CanQual<FunctionProtoType> prototype = GetFormalType(MD);
180
181  if (MD->isInstance()) {
182    // The abstract case is perfectly fine.
183    const CXXRecordDecl *ThisType =
184        CGM.getCXXABI().getThisArgumentTypeForMethod(MD);
185    return arrangeCXXMethodType(ThisType, prototype.getTypePtr());
186  }
187
188  return arrangeFreeFunctionType(prototype);
189}
190
191/// Arrange the argument and result information for a declaration
192/// or definition to the given constructor variant.
193const CGFunctionInfo &
194CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D,
195                                               CXXCtorType ctorKind) {
196  SmallVector<CanQualType, 16> argTypes;
197  argTypes.push_back(GetThisType(Context, D->getParent()));
198
199  GlobalDecl GD(D, ctorKind);
200  CanQualType resultType =
201    TheCXXABI.HasThisReturn(GD) ? argTypes.front() : Context.VoidTy;
202
203  TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes);
204
205  CanQual<FunctionProtoType> FTP = GetFormalType(D);
206
207  RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size());
208
209  // Add the formal parameters.
210  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
211    argTypes.push_back(FTP->getArgType(i));
212
213  FunctionType::ExtInfo extInfo = FTP->getExtInfo();
214  return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo, required);
215}
216
217/// Arrange the argument and result information for a declaration,
218/// definition, or call to the given destructor variant.  It so
219/// happens that all three cases produce the same information.
220const CGFunctionInfo &
221CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D,
222                                   CXXDtorType dtorKind) {
223  SmallVector<CanQualType, 2> argTypes;
224  argTypes.push_back(GetThisType(Context, D->getParent()));
225
226  GlobalDecl GD(D, dtorKind);
227  CanQualType resultType =
228    TheCXXABI.HasThisReturn(GD) ? argTypes.front() : Context.VoidTy;
229
230  TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes);
231
232  CanQual<FunctionProtoType> FTP = GetFormalType(D);
233  assert(FTP->getNumArgs() == 0 && "dtor with formal parameters");
234  assert(FTP->isVariadic() == 0 && "dtor with formal parameters");
235
236  FunctionType::ExtInfo extInfo = FTP->getExtInfo();
237  return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo,
238                                 RequiredArgs::All);
239}
240
241/// Arrange the argument and result information for the declaration or
242/// definition of the given function.
243const CGFunctionInfo &
244CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
245  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
246    if (MD->isInstance())
247      return arrangeCXXMethodDeclaration(MD);
248
249  CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
250
251  assert(isa<FunctionType>(FTy));
252
253  // When declaring a function without a prototype, always use a
254  // non-variadic type.
255  if (isa<FunctionNoProtoType>(FTy)) {
256    CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
257    return arrangeLLVMFunctionInfo(noProto->getResultType(), None,
258                                   noProto->getExtInfo(), RequiredArgs::All);
259  }
260
261  assert(isa<FunctionProtoType>(FTy));
262  return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>());
263}
264
265/// Arrange the argument and result information for the declaration or
266/// definition of an Objective-C method.
267const CGFunctionInfo &
268CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
269  // It happens that this is the same as a call with no optional
270  // arguments, except also using the formal 'self' type.
271  return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
272}
273
274/// Arrange the argument and result information for the function type
275/// through which to perform a send to the given Objective-C method,
276/// using the given receiver type.  The receiver type is not always
277/// the 'self' type of the method or even an Objective-C pointer type.
278/// This is *not* the right method for actually performing such a
279/// message send, due to the possibility of optional arguments.
280const CGFunctionInfo &
281CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
282                                              QualType receiverType) {
283  SmallVector<CanQualType, 16> argTys;
284  argTys.push_back(Context.getCanonicalParamType(receiverType));
285  argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
286  // FIXME: Kill copy?
287  for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(),
288         e = MD->param_end(); i != e; ++i) {
289    argTys.push_back(Context.getCanonicalParamType((*i)->getType()));
290  }
291
292  FunctionType::ExtInfo einfo;
293  einfo = einfo.withCallingConv(getCallingConventionForDecl(MD));
294
295  if (getContext().getLangOpts().ObjCAutoRefCount &&
296      MD->hasAttr<NSReturnsRetainedAttr>())
297    einfo = einfo.withProducesResult(true);
298
299  RequiredArgs required =
300    (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
301
302  return arrangeLLVMFunctionInfo(GetReturnType(MD->getResultType()), argTys,
303                                 einfo, required);
304}
305
306const CGFunctionInfo &
307CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
308  // FIXME: Do we need to handle ObjCMethodDecl?
309  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
310
311  if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
312    return arrangeCXXConstructorDeclaration(CD, GD.getCtorType());
313
314  if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
315    return arrangeCXXDestructor(DD, GD.getDtorType());
316
317  return arrangeFunctionDeclaration(FD);
318}
319
320/// Arrange a call as unto a free function, except possibly with an
321/// additional number of formal parameters considered required.
322static const CGFunctionInfo &
323arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
324                            const CallArgList &args,
325                            const FunctionType *fnType,
326                            unsigned numExtraRequiredArgs) {
327  assert(args.size() >= numExtraRequiredArgs);
328
329  // In most cases, there are no optional arguments.
330  RequiredArgs required = RequiredArgs::All;
331
332  // If we have a variadic prototype, the required arguments are the
333  // extra prefix plus the arguments in the prototype.
334  if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
335    if (proto->isVariadic())
336      required = RequiredArgs(proto->getNumArgs() + numExtraRequiredArgs);
337
338  // If we don't have a prototype at all, but we're supposed to
339  // explicitly use the variadic convention for unprototyped calls,
340  // treat all of the arguments as required but preserve the nominal
341  // possibility of variadics.
342  } else if (CGT.CGM.getTargetCodeGenInfo()
343               .isNoProtoCallVariadic(args, cast<FunctionNoProtoType>(fnType))) {
344    required = RequiredArgs(args.size());
345  }
346
347  return CGT.arrangeFreeFunctionCall(fnType->getResultType(), args,
348                                     fnType->getExtInfo(), required);
349}
350
351/// Figure out the rules for calling a function with the given formal
352/// type using the given arguments.  The arguments are necessary
353/// because the function might be unprototyped, in which case it's
354/// target-dependent in crazy ways.
355const CGFunctionInfo &
356CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
357                                      const FunctionType *fnType) {
358  return arrangeFreeFunctionLikeCall(*this, args, fnType, 0);
359}
360
361/// A block function call is essentially a free-function call with an
362/// extra implicit argument.
363const CGFunctionInfo &
364CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
365                                       const FunctionType *fnType) {
366  return arrangeFreeFunctionLikeCall(*this, args, fnType, 1);
367}
368
369const CGFunctionInfo &
370CodeGenTypes::arrangeFreeFunctionCall(QualType resultType,
371                                      const CallArgList &args,
372                                      FunctionType::ExtInfo info,
373                                      RequiredArgs required) {
374  // FIXME: Kill copy.
375  SmallVector<CanQualType, 16> argTypes;
376  for (CallArgList::const_iterator i = args.begin(), e = args.end();
377       i != e; ++i)
378    argTypes.push_back(Context.getCanonicalParamType(i->Ty));
379  return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info,
380                                 required);
381}
382
383/// Arrange a call to a C++ method, passing the given arguments.
384const CGFunctionInfo &
385CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
386                                   const FunctionProtoType *FPT,
387                                   RequiredArgs required) {
388  // FIXME: Kill copy.
389  SmallVector<CanQualType, 16> argTypes;
390  for (CallArgList::const_iterator i = args.begin(), e = args.end();
391       i != e; ++i)
392    argTypes.push_back(Context.getCanonicalParamType(i->Ty));
393
394  FunctionType::ExtInfo info = FPT->getExtInfo();
395  return arrangeLLVMFunctionInfo(GetReturnType(FPT->getResultType()),
396                                 argTypes, info, required);
397}
398
399const CGFunctionInfo &
400CodeGenTypes::arrangeFunctionDeclaration(QualType resultType,
401                                         const FunctionArgList &args,
402                                         const FunctionType::ExtInfo &info,
403                                         bool isVariadic) {
404  // FIXME: Kill copy.
405  SmallVector<CanQualType, 16> argTypes;
406  for (FunctionArgList::const_iterator i = args.begin(), e = args.end();
407       i != e; ++i)
408    argTypes.push_back(Context.getCanonicalParamType((*i)->getType()));
409
410  RequiredArgs required =
411    (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
412  return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info,
413                                 required);
414}
415
416const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
417  return arrangeLLVMFunctionInfo(getContext().VoidTy, None,
418                                 FunctionType::ExtInfo(), RequiredArgs::All);
419}
420
421/// Arrange the argument and result information for an abstract value
422/// of a given function type.  This is the method which all of the
423/// above functions ultimately defer to.
424const CGFunctionInfo &
425CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
426                                      ArrayRef<CanQualType> argTypes,
427                                      FunctionType::ExtInfo info,
428                                      RequiredArgs required) {
429#ifndef NDEBUG
430  for (ArrayRef<CanQualType>::const_iterator
431         I = argTypes.begin(), E = argTypes.end(); I != E; ++I)
432    assert(I->isCanonicalAsParam());
433#endif
434
435  unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
436
437  // Lookup or create unique function info.
438  llvm::FoldingSetNodeID ID;
439  CGFunctionInfo::Profile(ID, info, required, resultType, argTypes);
440
441  void *insertPos = 0;
442  CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
443  if (FI)
444    return *FI;
445
446  // Construct the function info.  We co-allocate the ArgInfos.
447  FI = CGFunctionInfo::create(CC, info, resultType, argTypes, required);
448  FunctionInfos.InsertNode(FI, insertPos);
449
450  bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted;
451  assert(inserted && "Recursively being processed?");
452
453  // Compute ABI information.
454  getABIInfo().computeInfo(*FI);
455
456  // Loop over all of the computed argument and return value info.  If any of
457  // them are direct or extend without a specified coerce type, specify the
458  // default now.
459  ABIArgInfo &retInfo = FI->getReturnInfo();
460  if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == 0)
461    retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
462
463  for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
464       I != E; ++I)
465    if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
466      I->info.setCoerceToType(ConvertType(I->type));
467
468  bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
469  assert(erased && "Not in set?");
470
471  return *FI;
472}
473
474CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
475                                       const FunctionType::ExtInfo &info,
476                                       CanQualType resultType,
477                                       ArrayRef<CanQualType> argTypes,
478                                       RequiredArgs required) {
479  void *buffer = operator new(sizeof(CGFunctionInfo) +
480                              sizeof(ArgInfo) * (argTypes.size() + 1));
481  CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
482  FI->CallingConvention = llvmCC;
483  FI->EffectiveCallingConvention = llvmCC;
484  FI->ASTCallingConvention = info.getCC();
485  FI->NoReturn = info.getNoReturn();
486  FI->ReturnsRetained = info.getProducesResult();
487  FI->Required = required;
488  FI->HasRegParm = info.getHasRegParm();
489  FI->RegParm = info.getRegParm();
490  FI->NumArgs = argTypes.size();
491  FI->getArgsBuffer()[0].type = resultType;
492  for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
493    FI->getArgsBuffer()[i + 1].type = argTypes[i];
494  return FI;
495}
496
497/***/
498
499void CodeGenTypes::GetExpandedTypes(QualType type,
500                     SmallVectorImpl<llvm::Type*> &expandedTypes) {
501  if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) {
502    uint64_t NumElts = AT->getSize().getZExtValue();
503    for (uint64_t Elt = 0; Elt < NumElts; ++Elt)
504      GetExpandedTypes(AT->getElementType(), expandedTypes);
505  } else if (const RecordType *RT = type->getAs<RecordType>()) {
506    const RecordDecl *RD = RT->getDecl();
507    assert(!RD->hasFlexibleArrayMember() &&
508           "Cannot expand structure with flexible array.");
509    if (RD->isUnion()) {
510      // Unions can be here only in degenerative cases - all the fields are same
511      // after flattening. Thus we have to use the "largest" field.
512      const FieldDecl *LargestFD = 0;
513      CharUnits UnionSize = CharUnits::Zero();
514
515      for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
516           i != e; ++i) {
517        const FieldDecl *FD = *i;
518        assert(!FD->isBitField() &&
519               "Cannot expand structure with bit-field members.");
520        CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
521        if (UnionSize < FieldSize) {
522          UnionSize = FieldSize;
523          LargestFD = FD;
524        }
525      }
526      if (LargestFD)
527        GetExpandedTypes(LargestFD->getType(), expandedTypes);
528    } else {
529      for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
530           i != e; ++i) {
531        assert(!i->isBitField() &&
532               "Cannot expand structure with bit-field members.");
533        GetExpandedTypes(i->getType(), expandedTypes);
534      }
535    }
536  } else if (const ComplexType *CT = type->getAs<ComplexType>()) {
537    llvm::Type *EltTy = ConvertType(CT->getElementType());
538    expandedTypes.push_back(EltTy);
539    expandedTypes.push_back(EltTy);
540  } else
541    expandedTypes.push_back(ConvertType(type));
542}
543
544llvm::Function::arg_iterator
545CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
546                                    llvm::Function::arg_iterator AI) {
547  assert(LV.isSimple() &&
548         "Unexpected non-simple lvalue during struct expansion.");
549
550  if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
551    unsigned NumElts = AT->getSize().getZExtValue();
552    QualType EltTy = AT->getElementType();
553    for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
554      llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, Elt);
555      LValue LV = MakeAddrLValue(EltAddr, EltTy);
556      AI = ExpandTypeFromArgs(EltTy, LV, AI);
557    }
558  } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
559    RecordDecl *RD = RT->getDecl();
560    if (RD->isUnion()) {
561      // Unions can be here only in degenerative cases - all the fields are same
562      // after flattening. Thus we have to use the "largest" field.
563      const FieldDecl *LargestFD = 0;
564      CharUnits UnionSize = CharUnits::Zero();
565
566      for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
567           i != e; ++i) {
568        const FieldDecl *FD = *i;
569        assert(!FD->isBitField() &&
570               "Cannot expand structure with bit-field members.");
571        CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
572        if (UnionSize < FieldSize) {
573          UnionSize = FieldSize;
574          LargestFD = FD;
575        }
576      }
577      if (LargestFD) {
578        // FIXME: What are the right qualifiers here?
579        LValue SubLV = EmitLValueForField(LV, LargestFD);
580        AI = ExpandTypeFromArgs(LargestFD->getType(), SubLV, AI);
581      }
582    } else {
583      for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
584           i != e; ++i) {
585        FieldDecl *FD = *i;
586        QualType FT = FD->getType();
587
588        // FIXME: What are the right qualifiers here?
589        LValue SubLV = EmitLValueForField(LV, FD);
590        AI = ExpandTypeFromArgs(FT, SubLV, AI);
591      }
592    }
593  } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
594    QualType EltTy = CT->getElementType();
595    llvm::Value *RealAddr = Builder.CreateStructGEP(LV.getAddress(), 0, "real");
596    EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy));
597    llvm::Value *ImagAddr = Builder.CreateStructGEP(LV.getAddress(), 1, "imag");
598    EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy));
599  } else {
600    EmitStoreThroughLValue(RValue::get(AI), LV);
601    ++AI;
602  }
603
604  return AI;
605}
606
607/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
608/// accessing some number of bytes out of it, try to gep into the struct to get
609/// at its inner goodness.  Dive as deep as possible without entering an element
610/// with an in-memory size smaller than DstSize.
611static llvm::Value *
612EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
613                                   llvm::StructType *SrcSTy,
614                                   uint64_t DstSize, CodeGenFunction &CGF) {
615  // We can't dive into a zero-element struct.
616  if (SrcSTy->getNumElements() == 0) return SrcPtr;
617
618  llvm::Type *FirstElt = SrcSTy->getElementType(0);
619
620  // If the first elt is at least as large as what we're looking for, or if the
621  // first element is the same size as the whole struct, we can enter it.
622  uint64_t FirstEltSize =
623    CGF.CGM.getDataLayout().getTypeAllocSize(FirstElt);
624  if (FirstEltSize < DstSize &&
625      FirstEltSize < CGF.CGM.getDataLayout().getTypeAllocSize(SrcSTy))
626    return SrcPtr;
627
628  // GEP into the first element.
629  SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
630
631  // If the first element is a struct, recurse.
632  llvm::Type *SrcTy =
633    cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
634  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
635    return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
636
637  return SrcPtr;
638}
639
640/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
641/// are either integers or pointers.  This does a truncation of the value if it
642/// is too large or a zero extension if it is too small.
643///
644/// This behaves as if the value were coerced through memory, so on big-endian
645/// targets the high bits are preserved in a truncation, while little-endian
646/// targets preserve the low bits.
647static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
648                                             llvm::Type *Ty,
649                                             CodeGenFunction &CGF) {
650  if (Val->getType() == Ty)
651    return Val;
652
653  if (isa<llvm::PointerType>(Val->getType())) {
654    // If this is Pointer->Pointer avoid conversion to and from int.
655    if (isa<llvm::PointerType>(Ty))
656      return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
657
658    // Convert the pointer to an integer so we can play with its width.
659    Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
660  }
661
662  llvm::Type *DestIntTy = Ty;
663  if (isa<llvm::PointerType>(DestIntTy))
664    DestIntTy = CGF.IntPtrTy;
665
666  if (Val->getType() != DestIntTy) {
667    const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
668    if (DL.isBigEndian()) {
669      // Preserve the high bits on big-endian targets.
670      // That is what memory coercion does.
671      uint64_t SrcSize = DL.getTypeAllocSizeInBits(Val->getType());
672      uint64_t DstSize = DL.getTypeAllocSizeInBits(DestIntTy);
673      if (SrcSize > DstSize) {
674        Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
675        Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
676      } else {
677        Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
678        Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
679      }
680    } else {
681      // Little-endian targets preserve the low bits. No shifts required.
682      Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
683    }
684  }
685
686  if (isa<llvm::PointerType>(Ty))
687    Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
688  return Val;
689}
690
691
692
693/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
694/// a pointer to an object of type \arg Ty.
695///
696/// This safely handles the case when the src type is smaller than the
697/// destination type; in this situation the values of bits which not
698/// present in the src are undefined.
699static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
700                                      llvm::Type *Ty,
701                                      CodeGenFunction &CGF) {
702  llvm::Type *SrcTy =
703    cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
704
705  // If SrcTy and Ty are the same, just do a load.
706  if (SrcTy == Ty)
707    return CGF.Builder.CreateLoad(SrcPtr);
708
709  uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
710
711  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
712    SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
713    SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
714  }
715
716  uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
717
718  // If the source and destination are integer or pointer types, just do an
719  // extension or truncation to the desired type.
720  if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
721      (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
722    llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
723    return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
724  }
725
726  // If load is legal, just bitcast the src pointer.
727  if (SrcSize >= DstSize) {
728    // Generally SrcSize is never greater than DstSize, since this means we are
729    // losing bits. However, this can happen in cases where the structure has
730    // additional padding, for example due to a user specified alignment.
731    //
732    // FIXME: Assert that we aren't truncating non-padding bits when have access
733    // to that information.
734    llvm::Value *Casted =
735      CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
736    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
737    // FIXME: Use better alignment / avoid requiring aligned load.
738    Load->setAlignment(1);
739    return Load;
740  }
741
742  // Otherwise do coercion through memory. This is stupid, but
743  // simple.
744  llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
745  llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy();
746  llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy);
747  llvm::Value *SrcCasted = CGF.Builder.CreateBitCast(SrcPtr, I8PtrTy);
748  // FIXME: Use better alignment.
749  CGF.Builder.CreateMemCpy(Casted, SrcCasted,
750      llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
751      1, false);
752  return CGF.Builder.CreateLoad(Tmp);
753}
754
755// Function to store a first-class aggregate into memory.  We prefer to
756// store the elements rather than the aggregate to be more friendly to
757// fast-isel.
758// FIXME: Do we need to recurse here?
759static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
760                          llvm::Value *DestPtr, bool DestIsVolatile,
761                          bool LowAlignment) {
762  // Prefer scalar stores to first-class aggregate stores.
763  if (llvm::StructType *STy =
764        dyn_cast<llvm::StructType>(Val->getType())) {
765    for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
766      llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i);
767      llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
768      llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr,
769                                                    DestIsVolatile);
770      if (LowAlignment)
771        SI->setAlignment(1);
772    }
773  } else {
774    llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile);
775    if (LowAlignment)
776      SI->setAlignment(1);
777  }
778}
779
780/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
781/// where the source and destination may have different types.
782///
783/// This safely handles the case when the src type is larger than the
784/// destination type; the upper bits of the src will be lost.
785static void CreateCoercedStore(llvm::Value *Src,
786                               llvm::Value *DstPtr,
787                               bool DstIsVolatile,
788                               CodeGenFunction &CGF) {
789  llvm::Type *SrcTy = Src->getType();
790  llvm::Type *DstTy =
791    cast<llvm::PointerType>(DstPtr->getType())->getElementType();
792  if (SrcTy == DstTy) {
793    CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
794    return;
795  }
796
797  uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
798
799  if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
800    DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
801    DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
802  }
803
804  // If the source and destination are integer or pointer types, just do an
805  // extension or truncation to the desired type.
806  if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
807      (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
808    Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
809    CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
810    return;
811  }
812
813  uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
814
815  // If store is legal, just bitcast the src pointer.
816  if (SrcSize <= DstSize) {
817    llvm::Value *Casted =
818      CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
819    // FIXME: Use better alignment / avoid requiring aligned store.
820    BuildAggStore(CGF, Src, Casted, DstIsVolatile, true);
821  } else {
822    // Otherwise do coercion through memory. This is stupid, but
823    // simple.
824
825    // Generally SrcSize is never greater than DstSize, since this means we are
826    // losing bits. However, this can happen in cases where the structure has
827    // additional padding, for example due to a user specified alignment.
828    //
829    // FIXME: Assert that we aren't truncating non-padding bits when have access
830    // to that information.
831    llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
832    CGF.Builder.CreateStore(Src, Tmp);
833    llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy();
834    llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy);
835    llvm::Value *DstCasted = CGF.Builder.CreateBitCast(DstPtr, I8PtrTy);
836    // FIXME: Use better alignment.
837    CGF.Builder.CreateMemCpy(DstCasted, Casted,
838        llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
839        1, false);
840  }
841}
842
843/***/
844
845bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
846  return FI.getReturnInfo().isIndirect();
847}
848
849bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
850  if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
851    switch (BT->getKind()) {
852    default:
853      return false;
854    case BuiltinType::Float:
855      return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
856    case BuiltinType::Double:
857      return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
858    case BuiltinType::LongDouble:
859      return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
860    }
861  }
862
863  return false;
864}
865
866bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
867  if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
868    if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
869      if (BT->getKind() == BuiltinType::LongDouble)
870        return getTarget().useObjCFP2RetForComplexLongDouble();
871    }
872  }
873
874  return false;
875}
876
877llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
878  const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
879  return GetFunctionType(FI);
880}
881
882llvm::FunctionType *
883CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
884
885  bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted;
886  assert(Inserted && "Recursively being processed?");
887
888  SmallVector<llvm::Type*, 8> argTypes;
889  llvm::Type *resultType = 0;
890
891  const ABIArgInfo &retAI = FI.getReturnInfo();
892  switch (retAI.getKind()) {
893  case ABIArgInfo::Expand:
894    llvm_unreachable("Invalid ABI kind for return argument");
895
896  case ABIArgInfo::Extend:
897  case ABIArgInfo::Direct:
898    resultType = retAI.getCoerceToType();
899    break;
900
901  case ABIArgInfo::Indirect: {
902    assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
903    resultType = llvm::Type::getVoidTy(getLLVMContext());
904
905    QualType ret = FI.getReturnType();
906    llvm::Type *ty = ConvertType(ret);
907    unsigned addressSpace = Context.getTargetAddressSpace(ret);
908    argTypes.push_back(llvm::PointerType::get(ty, addressSpace));
909    break;
910  }
911
912  case ABIArgInfo::Ignore:
913    resultType = llvm::Type::getVoidTy(getLLVMContext());
914    break;
915  }
916
917  // Add in all of the required arguments.
918  CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), ie;
919  if (FI.isVariadic()) {
920    ie = it + FI.getRequiredArgs().getNumRequiredArgs();
921  } else {
922    ie = FI.arg_end();
923  }
924  for (; it != ie; ++it) {
925    const ABIArgInfo &argAI = it->info;
926
927    // Insert a padding type to ensure proper alignment.
928    if (llvm::Type *PaddingType = argAI.getPaddingType())
929      argTypes.push_back(PaddingType);
930
931    switch (argAI.getKind()) {
932    case ABIArgInfo::Ignore:
933      break;
934
935    case ABIArgInfo::Indirect: {
936      // indirect arguments are always on the stack, which is addr space #0.
937      llvm::Type *LTy = ConvertTypeForMem(it->type);
938      argTypes.push_back(LTy->getPointerTo());
939      break;
940    }
941
942    case ABIArgInfo::Extend:
943    case ABIArgInfo::Direct: {
944      // If the coerce-to type is a first class aggregate, flatten it.  Either
945      // way is semantically identical, but fast-isel and the optimizer
946      // generally likes scalar values better than FCAs.
947      llvm::Type *argType = argAI.getCoerceToType();
948      if (llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) {
949        for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
950          argTypes.push_back(st->getElementType(i));
951      } else {
952        argTypes.push_back(argType);
953      }
954      break;
955    }
956
957    case ABIArgInfo::Expand:
958      GetExpandedTypes(it->type, argTypes);
959      break;
960    }
961  }
962
963  bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
964  assert(Erased && "Not in set?");
965
966  return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic());
967}
968
969llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
970  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
971  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
972
973  if (!isFuncTypeConvertible(FPT))
974    return llvm::StructType::get(getLLVMContext());
975
976  const CGFunctionInfo *Info;
977  if (isa<CXXDestructorDecl>(MD))
978    Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType());
979  else
980    Info = &arrangeCXXMethodDeclaration(MD);
981  return GetFunctionType(*Info);
982}
983
984void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
985                                           const Decl *TargetDecl,
986                                           AttributeListType &PAL,
987                                           unsigned &CallingConv,
988                                           bool AttrOnCallSite) {
989  llvm::AttrBuilder FuncAttrs;
990  llvm::AttrBuilder RetAttrs;
991
992  CallingConv = FI.getEffectiveCallingConvention();
993
994  if (FI.isNoReturn())
995    FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
996
997  // FIXME: handle sseregparm someday...
998  if (TargetDecl) {
999    if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1000      FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1001    if (TargetDecl->hasAttr<NoThrowAttr>())
1002      FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1003    if (TargetDecl->hasAttr<NoReturnAttr>())
1004      FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1005
1006    if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1007      const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
1008      if (FPT && FPT->isNothrow(getContext()))
1009        FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1010      // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1011      // These attributes are not inherited by overloads.
1012      const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1013      if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1014        FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1015    }
1016
1017    // 'const' and 'pure' attribute functions are also nounwind.
1018    if (TargetDecl->hasAttr<ConstAttr>()) {
1019      FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1020      FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1021    } else if (TargetDecl->hasAttr<PureAttr>()) {
1022      FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1023      FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1024    }
1025    if (TargetDecl->hasAttr<MallocAttr>())
1026      RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1027  }
1028
1029  if (CodeGenOpts.OptimizeSize)
1030    FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1031  if (CodeGenOpts.OptimizeSize == 2)
1032    FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1033  if (CodeGenOpts.DisableRedZone)
1034    FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1035  if (CodeGenOpts.NoImplicitFloat)
1036    FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1037
1038  if (AttrOnCallSite) {
1039    // Attributes that should go on the call site only.
1040    if (!CodeGenOpts.SimplifyLibCalls)
1041      FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1042  } else {
1043    // Attributes that should go on the function, but not the call site.
1044    if (!CodeGenOpts.DisableFPElim) {
1045      FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1046    } else if (CodeGenOpts.OmitLeafFramePointer) {
1047      FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1048      FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1049    } else {
1050      FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1051      FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1052    }
1053
1054    FuncAttrs.addAttribute("less-precise-fpmad",
1055                           llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1056    FuncAttrs.addAttribute("no-infs-fp-math",
1057                           llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1058    FuncAttrs.addAttribute("no-nans-fp-math",
1059                           llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1060    FuncAttrs.addAttribute("unsafe-fp-math",
1061                           llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1062    FuncAttrs.addAttribute("use-soft-float",
1063                           llvm::toStringRef(CodeGenOpts.SoftFloat));
1064    FuncAttrs.addAttribute("stack-protector-buffer-size",
1065                           llvm::utostr(CodeGenOpts.SSPBufferSize));
1066
1067    if (!CodeGenOpts.StackRealignment)
1068      FuncAttrs.addAttribute("no-realign-stack");
1069  }
1070
1071  QualType RetTy = FI.getReturnType();
1072  unsigned Index = 1;
1073  const ABIArgInfo &RetAI = FI.getReturnInfo();
1074  switch (RetAI.getKind()) {
1075  case ABIArgInfo::Extend:
1076    if (RetTy->hasSignedIntegerRepresentation())
1077      RetAttrs.addAttribute(llvm::Attribute::SExt);
1078    else if (RetTy->hasUnsignedIntegerRepresentation())
1079      RetAttrs.addAttribute(llvm::Attribute::ZExt);
1080    // FALL THROUGH
1081  case ABIArgInfo::Direct:
1082    if (RetAI.getInReg())
1083      RetAttrs.addAttribute(llvm::Attribute::InReg);
1084    break;
1085  case ABIArgInfo::Ignore:
1086    break;
1087
1088  case ABIArgInfo::Indirect: {
1089    llvm::AttrBuilder SRETAttrs;
1090    SRETAttrs.addAttribute(llvm::Attribute::StructRet);
1091    if (RetAI.getInReg())
1092      SRETAttrs.addAttribute(llvm::Attribute::InReg);
1093    PAL.push_back(llvm::
1094                  AttributeSet::get(getLLVMContext(), Index, SRETAttrs));
1095
1096    ++Index;
1097    // sret disables readnone and readonly
1098    FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1099      .removeAttribute(llvm::Attribute::ReadNone);
1100    break;
1101  }
1102
1103  case ABIArgInfo::Expand:
1104    llvm_unreachable("Invalid ABI kind for return argument");
1105  }
1106
1107  if (RetAttrs.hasAttributes())
1108    PAL.push_back(llvm::
1109                  AttributeSet::get(getLLVMContext(),
1110                                    llvm::AttributeSet::ReturnIndex,
1111                                    RetAttrs));
1112
1113  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1114         ie = FI.arg_end(); it != ie; ++it) {
1115    QualType ParamType = it->type;
1116    const ABIArgInfo &AI = it->info;
1117    llvm::AttrBuilder Attrs;
1118
1119    if (AI.getPaddingType()) {
1120      if (AI.getPaddingInReg())
1121        PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index,
1122                                              llvm::Attribute::InReg));
1123      // Increment Index if there is padding.
1124      ++Index;
1125    }
1126
1127    // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
1128    // have the corresponding parameter variable.  It doesn't make
1129    // sense to do it here because parameters are so messed up.
1130    switch (AI.getKind()) {
1131    case ABIArgInfo::Extend:
1132      if (ParamType->isSignedIntegerOrEnumerationType())
1133        Attrs.addAttribute(llvm::Attribute::SExt);
1134      else if (ParamType->isUnsignedIntegerOrEnumerationType())
1135        Attrs.addAttribute(llvm::Attribute::ZExt);
1136      // FALL THROUGH
1137    case ABIArgInfo::Direct:
1138      if (AI.getInReg())
1139        Attrs.addAttribute(llvm::Attribute::InReg);
1140
1141      // FIXME: handle sseregparm someday...
1142
1143      if (llvm::StructType *STy =
1144          dyn_cast<llvm::StructType>(AI.getCoerceToType())) {
1145        unsigned Extra = STy->getNumElements()-1;  // 1 will be added below.
1146        if (Attrs.hasAttributes())
1147          for (unsigned I = 0; I < Extra; ++I)
1148            PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index + I,
1149                                                  Attrs));
1150        Index += Extra;
1151      }
1152      break;
1153
1154    case ABIArgInfo::Indirect:
1155      if (AI.getInReg())
1156        Attrs.addAttribute(llvm::Attribute::InReg);
1157
1158      if (AI.getIndirectByVal())
1159        Attrs.addAttribute(llvm::Attribute::ByVal);
1160
1161      Attrs.addAlignmentAttr(AI.getIndirectAlign());
1162
1163      // byval disables readnone and readonly.
1164      FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1165        .removeAttribute(llvm::Attribute::ReadNone);
1166      break;
1167
1168    case ABIArgInfo::Ignore:
1169      // Skip increment, no matching LLVM parameter.
1170      continue;
1171
1172    case ABIArgInfo::Expand: {
1173      SmallVector<llvm::Type*, 8> types;
1174      // FIXME: This is rather inefficient. Do we ever actually need to do
1175      // anything here? The result should be just reconstructed on the other
1176      // side, so extension should be a non-issue.
1177      getTypes().GetExpandedTypes(ParamType, types);
1178      Index += types.size();
1179      continue;
1180    }
1181    }
1182
1183    if (Attrs.hasAttributes())
1184      PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index, Attrs));
1185    ++Index;
1186  }
1187  if (FuncAttrs.hasAttributes())
1188    PAL.push_back(llvm::
1189                  AttributeSet::get(getLLVMContext(),
1190                                    llvm::AttributeSet::FunctionIndex,
1191                                    FuncAttrs));
1192}
1193
1194/// An argument came in as a promoted argument; demote it back to its
1195/// declared type.
1196static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
1197                                         const VarDecl *var,
1198                                         llvm::Value *value) {
1199  llvm::Type *varType = CGF.ConvertType(var->getType());
1200
1201  // This can happen with promotions that actually don't change the
1202  // underlying type, like the enum promotions.
1203  if (value->getType() == varType) return value;
1204
1205  assert((varType->isIntegerTy() || varType->isFloatingPointTy())
1206         && "unexpected promotion type");
1207
1208  if (isa<llvm::IntegerType>(varType))
1209    return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
1210
1211  return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
1212}
1213
1214void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
1215                                         llvm::Function *Fn,
1216                                         const FunctionArgList &Args) {
1217  // If this is an implicit-return-zero function, go ahead and
1218  // initialize the return value.  TODO: it might be nice to have
1219  // a more general mechanism for this that didn't require synthesized
1220  // return statements.
1221  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
1222    if (FD->hasImplicitReturnZero()) {
1223      QualType RetTy = FD->getResultType().getUnqualifiedType();
1224      llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
1225      llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
1226      Builder.CreateStore(Zero, ReturnValue);
1227    }
1228  }
1229
1230  // FIXME: We no longer need the types from FunctionArgList; lift up and
1231  // simplify.
1232
1233  // Emit allocs for param decls.  Give the LLVM Argument nodes names.
1234  llvm::Function::arg_iterator AI = Fn->arg_begin();
1235
1236  // Name the struct return argument.
1237  if (CGM.ReturnTypeUsesSRet(FI)) {
1238    AI->setName("agg.result");
1239    AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1240                                        AI->getArgNo() + 1,
1241                                        llvm::Attribute::NoAlias));
1242    ++AI;
1243  }
1244
1245  assert(FI.arg_size() == Args.size() &&
1246         "Mismatch between function signature & arguments.");
1247  unsigned ArgNo = 1;
1248  CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
1249  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1250       i != e; ++i, ++info_it, ++ArgNo) {
1251    const VarDecl *Arg = *i;
1252    QualType Ty = info_it->type;
1253    const ABIArgInfo &ArgI = info_it->info;
1254
1255    bool isPromoted =
1256      isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
1257
1258    // Skip the dummy padding argument.
1259    if (ArgI.getPaddingType())
1260      ++AI;
1261
1262    switch (ArgI.getKind()) {
1263    case ABIArgInfo::Indirect: {
1264      llvm::Value *V = AI;
1265
1266      if (!hasScalarEvaluationKind(Ty)) {
1267        // Aggregates and complex variables are accessed by reference.  All we
1268        // need to do is realign the value, if requested
1269        if (ArgI.getIndirectRealign()) {
1270          llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
1271
1272          // Copy from the incoming argument pointer to the temporary with the
1273          // appropriate alignment.
1274          //
1275          // FIXME: We should have a common utility for generating an aggregate
1276          // copy.
1277          llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
1278          CharUnits Size = getContext().getTypeSizeInChars(Ty);
1279          llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
1280          llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
1281          Builder.CreateMemCpy(Dst,
1282                               Src,
1283                               llvm::ConstantInt::get(IntPtrTy,
1284                                                      Size.getQuantity()),
1285                               ArgI.getIndirectAlign(),
1286                               false);
1287          V = AlignedTemp;
1288        }
1289      } else {
1290        // Load scalar value from indirect argument.
1291        CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
1292        V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty);
1293
1294        if (isPromoted)
1295          V = emitArgumentDemotion(*this, Arg, V);
1296      }
1297      EmitParmDecl(*Arg, V, ArgNo);
1298      break;
1299    }
1300
1301    case ABIArgInfo::Extend:
1302    case ABIArgInfo::Direct: {
1303
1304      // If we have the trivial case, handle it with no muss and fuss.
1305      if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
1306          ArgI.getCoerceToType() == ConvertType(Ty) &&
1307          ArgI.getDirectOffset() == 0) {
1308        assert(AI != Fn->arg_end() && "Argument mismatch!");
1309        llvm::Value *V = AI;
1310
1311        if (Arg->getType().isRestrictQualified())
1312          AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1313                                              AI->getArgNo() + 1,
1314                                              llvm::Attribute::NoAlias));
1315
1316        // Ensure the argument is the correct type.
1317        if (V->getType() != ArgI.getCoerceToType())
1318          V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
1319
1320        if (isPromoted)
1321          V = emitArgumentDemotion(*this, Arg, V);
1322
1323        if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) {
1324          if (MD->isVirtual() && Arg == CXXABIThisDecl)
1325            V = CGM.getCXXABI().adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V);
1326        }
1327
1328        // Because of merging of function types from multiple decls it is
1329        // possible for the type of an argument to not match the corresponding
1330        // type in the function type. Since we are codegening the callee
1331        // in here, add a cast to the argument type.
1332        llvm::Type *LTy = ConvertType(Arg->getType());
1333        if (V->getType() != LTy)
1334          V = Builder.CreateBitCast(V, LTy);
1335
1336        EmitParmDecl(*Arg, V, ArgNo);
1337        break;
1338      }
1339
1340      llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName());
1341
1342      // The alignment we need to use is the max of the requested alignment for
1343      // the argument plus the alignment required by our access code below.
1344      unsigned AlignmentToUse =
1345        CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType());
1346      AlignmentToUse = std::max(AlignmentToUse,
1347                        (unsigned)getContext().getDeclAlign(Arg).getQuantity());
1348
1349      Alloca->setAlignment(AlignmentToUse);
1350      llvm::Value *V = Alloca;
1351      llvm::Value *Ptr = V;    // Pointer to store into.
1352
1353      // If the value is offset in memory, apply the offset now.
1354      if (unsigned Offs = ArgI.getDirectOffset()) {
1355        Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
1356        Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
1357        Ptr = Builder.CreateBitCast(Ptr,
1358                          llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
1359      }
1360
1361      // If the coerce-to type is a first class aggregate, we flatten it and
1362      // pass the elements. Either way is semantically identical, but fast-isel
1363      // and the optimizer generally likes scalar values better than FCAs.
1364      llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
1365      if (STy && STy->getNumElements() > 1) {
1366        uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
1367        llvm::Type *DstTy =
1368          cast<llvm::PointerType>(Ptr->getType())->getElementType();
1369        uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
1370
1371        if (SrcSize <= DstSize) {
1372          Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
1373
1374          for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1375            assert(AI != Fn->arg_end() && "Argument mismatch!");
1376            AI->setName(Arg->getName() + ".coerce" + Twine(i));
1377            llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
1378            Builder.CreateStore(AI++, EltPtr);
1379          }
1380        } else {
1381          llvm::AllocaInst *TempAlloca =
1382            CreateTempAlloca(ArgI.getCoerceToType(), "coerce");
1383          TempAlloca->setAlignment(AlignmentToUse);
1384          llvm::Value *TempV = TempAlloca;
1385
1386          for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1387            assert(AI != Fn->arg_end() && "Argument mismatch!");
1388            AI->setName(Arg->getName() + ".coerce" + Twine(i));
1389            llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i);
1390            Builder.CreateStore(AI++, EltPtr);
1391          }
1392
1393          Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse);
1394        }
1395      } else {
1396        // Simple case, just do a coerced store of the argument into the alloca.
1397        assert(AI != Fn->arg_end() && "Argument mismatch!");
1398        AI->setName(Arg->getName() + ".coerce");
1399        CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
1400      }
1401
1402
1403      // Match to what EmitParmDecl is expecting for this type.
1404      if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
1405        V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty);
1406        if (isPromoted)
1407          V = emitArgumentDemotion(*this, Arg, V);
1408      }
1409      EmitParmDecl(*Arg, V, ArgNo);
1410      continue;  // Skip ++AI increment, already done.
1411    }
1412
1413    case ABIArgInfo::Expand: {
1414      // If this structure was expanded into multiple arguments then
1415      // we need to create a temporary and reconstruct it from the
1416      // arguments.
1417      llvm::AllocaInst *Alloca = CreateMemTemp(Ty);
1418      CharUnits Align = getContext().getDeclAlign(Arg);
1419      Alloca->setAlignment(Align.getQuantity());
1420      LValue LV = MakeAddrLValue(Alloca, Ty, Align);
1421      llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI);
1422      EmitParmDecl(*Arg, Alloca, ArgNo);
1423
1424      // Name the arguments used in expansion and increment AI.
1425      unsigned Index = 0;
1426      for (; AI != End; ++AI, ++Index)
1427        AI->setName(Arg->getName() + "." + Twine(Index));
1428      continue;
1429    }
1430
1431    case ABIArgInfo::Ignore:
1432      // Initialize the local variable appropriately.
1433      if (!hasScalarEvaluationKind(Ty))
1434        EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo);
1435      else
1436        EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())),
1437                     ArgNo);
1438
1439      // Skip increment, no matching LLVM parameter.
1440      continue;
1441    }
1442
1443    ++AI;
1444  }
1445  assert(AI == Fn->arg_end() && "Argument mismatch!");
1446}
1447
1448static void eraseUnusedBitCasts(llvm::Instruction *insn) {
1449  while (insn->use_empty()) {
1450    llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
1451    if (!bitcast) return;
1452
1453    // This is "safe" because we would have used a ConstantExpr otherwise.
1454    insn = cast<llvm::Instruction>(bitcast->getOperand(0));
1455    bitcast->eraseFromParent();
1456  }
1457}
1458
1459/// Try to emit a fused autorelease of a return result.
1460static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
1461                                                    llvm::Value *result) {
1462  // We must be immediately followed the cast.
1463  llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
1464  if (BB->empty()) return 0;
1465  if (&BB->back() != result) return 0;
1466
1467  llvm::Type *resultType = result->getType();
1468
1469  // result is in a BasicBlock and is therefore an Instruction.
1470  llvm::Instruction *generator = cast<llvm::Instruction>(result);
1471
1472  SmallVector<llvm::Instruction*,4> insnsToKill;
1473
1474  // Look for:
1475  //  %generator = bitcast %type1* %generator2 to %type2*
1476  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
1477    // We would have emitted this as a constant if the operand weren't
1478    // an Instruction.
1479    generator = cast<llvm::Instruction>(bitcast->getOperand(0));
1480
1481    // Require the generator to be immediately followed by the cast.
1482    if (generator->getNextNode() != bitcast)
1483      return 0;
1484
1485    insnsToKill.push_back(bitcast);
1486  }
1487
1488  // Look for:
1489  //   %generator = call i8* @objc_retain(i8* %originalResult)
1490  // or
1491  //   %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
1492  llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
1493  if (!call) return 0;
1494
1495  bool doRetainAutorelease;
1496
1497  if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) {
1498    doRetainAutorelease = true;
1499  } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints()
1500                                          .objc_retainAutoreleasedReturnValue) {
1501    doRetainAutorelease = false;
1502
1503    // If we emitted an assembly marker for this call (and the
1504    // ARCEntrypoints field should have been set if so), go looking
1505    // for that call.  If we can't find it, we can't do this
1506    // optimization.  But it should always be the immediately previous
1507    // instruction, unless we needed bitcasts around the call.
1508    if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) {
1509      llvm::Instruction *prev = call->getPrevNode();
1510      assert(prev);
1511      if (isa<llvm::BitCastInst>(prev)) {
1512        prev = prev->getPrevNode();
1513        assert(prev);
1514      }
1515      assert(isa<llvm::CallInst>(prev));
1516      assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
1517               CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker);
1518      insnsToKill.push_back(prev);
1519    }
1520  } else {
1521    return 0;
1522  }
1523
1524  result = call->getArgOperand(0);
1525  insnsToKill.push_back(call);
1526
1527  // Keep killing bitcasts, for sanity.  Note that we no longer care
1528  // about precise ordering as long as there's exactly one use.
1529  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
1530    if (!bitcast->hasOneUse()) break;
1531    insnsToKill.push_back(bitcast);
1532    result = bitcast->getOperand(0);
1533  }
1534
1535  // Delete all the unnecessary instructions, from latest to earliest.
1536  for (SmallVectorImpl<llvm::Instruction*>::iterator
1537         i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
1538    (*i)->eraseFromParent();
1539
1540  // Do the fused retain/autorelease if we were asked to.
1541  if (doRetainAutorelease)
1542    result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
1543
1544  // Cast back to the result type.
1545  return CGF.Builder.CreateBitCast(result, resultType);
1546}
1547
1548/// If this is a +1 of the value of an immutable 'self', remove it.
1549static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
1550                                          llvm::Value *result) {
1551  // This is only applicable to a method with an immutable 'self'.
1552  const ObjCMethodDecl *method =
1553    dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
1554  if (!method) return 0;
1555  const VarDecl *self = method->getSelfDecl();
1556  if (!self->getType().isConstQualified()) return 0;
1557
1558  // Look for a retain call.
1559  llvm::CallInst *retainCall =
1560    dyn_cast<llvm::CallInst>(result->stripPointerCasts());
1561  if (!retainCall ||
1562      retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain)
1563    return 0;
1564
1565  // Look for an ordinary load of 'self'.
1566  llvm::Value *retainedValue = retainCall->getArgOperand(0);
1567  llvm::LoadInst *load =
1568    dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
1569  if (!load || load->isAtomic() || load->isVolatile() ||
1570      load->getPointerOperand() != CGF.GetAddrOfLocalVar(self))
1571    return 0;
1572
1573  // Okay!  Burn it all down.  This relies for correctness on the
1574  // assumption that the retain is emitted as part of the return and
1575  // that thereafter everything is used "linearly".
1576  llvm::Type *resultType = result->getType();
1577  eraseUnusedBitCasts(cast<llvm::Instruction>(result));
1578  assert(retainCall->use_empty());
1579  retainCall->eraseFromParent();
1580  eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
1581
1582  return CGF.Builder.CreateBitCast(load, resultType);
1583}
1584
1585/// Emit an ARC autorelease of the result of a function.
1586///
1587/// \return the value to actually return from the function
1588static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
1589                                            llvm::Value *result) {
1590  // If we're returning 'self', kill the initial retain.  This is a
1591  // heuristic attempt to "encourage correctness" in the really unfortunate
1592  // case where we have a return of self during a dealloc and we desperately
1593  // need to avoid the possible autorelease.
1594  if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
1595    return self;
1596
1597  // At -O0, try to emit a fused retain/autorelease.
1598  if (CGF.shouldUseFusedARCCalls())
1599    if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
1600      return fused;
1601
1602  return CGF.EmitARCAutoreleaseReturnValue(result);
1603}
1604
1605/// Heuristically search for a dominating store to the return-value slot.
1606static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
1607  // If there are multiple uses of the return-value slot, just check
1608  // for something immediately preceding the IP.  Sometimes this can
1609  // happen with how we generate implicit-returns; it can also happen
1610  // with noreturn cleanups.
1611  if (!CGF.ReturnValue->hasOneUse()) {
1612    llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
1613    if (IP->empty()) return 0;
1614    llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back());
1615    if (!store) return 0;
1616    if (store->getPointerOperand() != CGF.ReturnValue) return 0;
1617    assert(!store->isAtomic() && !store->isVolatile()); // see below
1618    return store;
1619  }
1620
1621  llvm::StoreInst *store =
1622    dyn_cast<llvm::StoreInst>(CGF.ReturnValue->use_back());
1623  if (!store) return 0;
1624
1625  // These aren't actually possible for non-coerced returns, and we
1626  // only care about non-coerced returns on this code path.
1627  assert(!store->isAtomic() && !store->isVolatile());
1628
1629  // Now do a first-and-dirty dominance check: just walk up the
1630  // single-predecessors chain from the current insertion point.
1631  llvm::BasicBlock *StoreBB = store->getParent();
1632  llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
1633  while (IP != StoreBB) {
1634    if (!(IP = IP->getSinglePredecessor()))
1635      return 0;
1636  }
1637
1638  // Okay, the store's basic block dominates the insertion point; we
1639  // can do our thing.
1640  return store;
1641}
1642
1643void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
1644                                         bool EmitRetDbgLoc) {
1645  // Functions with no result always return void.
1646  if (ReturnValue == 0) {
1647    Builder.CreateRetVoid();
1648    return;
1649  }
1650
1651  llvm::DebugLoc RetDbgLoc;
1652  llvm::Value *RV = 0;
1653  QualType RetTy = FI.getReturnType();
1654  const ABIArgInfo &RetAI = FI.getReturnInfo();
1655
1656  switch (RetAI.getKind()) {
1657  case ABIArgInfo::Indirect: {
1658    switch (getEvaluationKind(RetTy)) {
1659    case TEK_Complex: {
1660      ComplexPairTy RT =
1661        EmitLoadOfComplex(MakeNaturalAlignAddrLValue(ReturnValue, RetTy));
1662      EmitStoreOfComplex(RT,
1663                       MakeNaturalAlignAddrLValue(CurFn->arg_begin(), RetTy),
1664                         /*isInit*/ true);
1665      break;
1666    }
1667    case TEK_Aggregate:
1668      // Do nothing; aggregrates get evaluated directly into the destination.
1669      break;
1670    case TEK_Scalar:
1671      EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
1672                        MakeNaturalAlignAddrLValue(CurFn->arg_begin(), RetTy),
1673                        /*isInit*/ true);
1674      break;
1675    }
1676    break;
1677  }
1678
1679  case ABIArgInfo::Extend:
1680  case ABIArgInfo::Direct:
1681    if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
1682        RetAI.getDirectOffset() == 0) {
1683      // The internal return value temp always will have pointer-to-return-type
1684      // type, just do a load.
1685
1686      // If there is a dominating store to ReturnValue, we can elide
1687      // the load, zap the store, and usually zap the alloca.
1688      if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) {
1689        // Reuse the debug location from the store unless there is
1690        // cleanup code to be emitted between the store and return
1691        // instruction.
1692        if (EmitRetDbgLoc && !AutoreleaseResult)
1693          RetDbgLoc = SI->getDebugLoc();
1694        // Get the stored value and nuke the now-dead store.
1695        RV = SI->getValueOperand();
1696        SI->eraseFromParent();
1697
1698        // If that was the only use of the return value, nuke it as well now.
1699        if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
1700          cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
1701          ReturnValue = 0;
1702        }
1703
1704      // Otherwise, we have to do a simple load.
1705      } else {
1706        RV = Builder.CreateLoad(ReturnValue);
1707      }
1708    } else {
1709      llvm::Value *V = ReturnValue;
1710      // If the value is offset in memory, apply the offset now.
1711      if (unsigned Offs = RetAI.getDirectOffset()) {
1712        V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
1713        V = Builder.CreateConstGEP1_32(V, Offs);
1714        V = Builder.CreateBitCast(V,
1715                         llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
1716      }
1717
1718      RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
1719    }
1720
1721    // In ARC, end functions that return a retainable type with a call
1722    // to objc_autoreleaseReturnValue.
1723    if (AutoreleaseResult) {
1724      assert(getLangOpts().ObjCAutoRefCount &&
1725             !FI.isReturnsRetained() &&
1726             RetTy->isObjCRetainableType());
1727      RV = emitAutoreleaseOfResult(*this, RV);
1728    }
1729
1730    break;
1731
1732  case ABIArgInfo::Ignore:
1733    break;
1734
1735  case ABIArgInfo::Expand:
1736    llvm_unreachable("Invalid ABI kind for return argument");
1737  }
1738
1739  llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
1740  if (!RetDbgLoc.isUnknown())
1741    Ret->setDebugLoc(RetDbgLoc);
1742}
1743
1744void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
1745                                          const VarDecl *param) {
1746  // StartFunction converted the ABI-lowered parameter(s) into a
1747  // local alloca.  We need to turn that into an r-value suitable
1748  // for EmitCall.
1749  llvm::Value *local = GetAddrOfLocalVar(param);
1750
1751  QualType type = param->getType();
1752
1753  // For the most part, we just need to load the alloca, except:
1754  // 1) aggregate r-values are actually pointers to temporaries, and
1755  // 2) references to non-scalars are pointers directly to the aggregate.
1756  // I don't know why references to scalars are different here.
1757  if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
1758    if (!hasScalarEvaluationKind(ref->getPointeeType()))
1759      return args.add(RValue::getAggregate(local), type);
1760
1761    // Locals which are references to scalars are represented
1762    // with allocas holding the pointer.
1763    return args.add(RValue::get(Builder.CreateLoad(local)), type);
1764  }
1765
1766  args.add(convertTempToRValue(local, type), type);
1767}
1768
1769static bool isProvablyNull(llvm::Value *addr) {
1770  return isa<llvm::ConstantPointerNull>(addr);
1771}
1772
1773static bool isProvablyNonNull(llvm::Value *addr) {
1774  return isa<llvm::AllocaInst>(addr);
1775}
1776
1777/// Emit the actual writing-back of a writeback.
1778static void emitWriteback(CodeGenFunction &CGF,
1779                          const CallArgList::Writeback &writeback) {
1780  const LValue &srcLV = writeback.Source;
1781  llvm::Value *srcAddr = srcLV.getAddress();
1782  assert(!isProvablyNull(srcAddr) &&
1783         "shouldn't have writeback for provably null argument");
1784
1785  llvm::BasicBlock *contBB = 0;
1786
1787  // If the argument wasn't provably non-null, we need to null check
1788  // before doing the store.
1789  bool provablyNonNull = isProvablyNonNull(srcAddr);
1790  if (!provablyNonNull) {
1791    llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
1792    contBB = CGF.createBasicBlock("icr.done");
1793
1794    llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
1795    CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
1796    CGF.EmitBlock(writebackBB);
1797  }
1798
1799  // Load the value to writeback.
1800  llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
1801
1802  // Cast it back, in case we're writing an id to a Foo* or something.
1803  value = CGF.Builder.CreateBitCast(value,
1804               cast<llvm::PointerType>(srcAddr->getType())->getElementType(),
1805                            "icr.writeback-cast");
1806
1807  // Perform the writeback.
1808
1809  // If we have a "to use" value, it's something we need to emit a use
1810  // of.  This has to be carefully threaded in: if it's done after the
1811  // release it's potentially undefined behavior (and the optimizer
1812  // will ignore it), and if it happens before the retain then the
1813  // optimizer could move the release there.
1814  if (writeback.ToUse) {
1815    assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
1816
1817    // Retain the new value.  No need to block-copy here:  the block's
1818    // being passed up the stack.
1819    value = CGF.EmitARCRetainNonBlock(value);
1820
1821    // Emit the intrinsic use here.
1822    CGF.EmitARCIntrinsicUse(writeback.ToUse);
1823
1824    // Load the old value (primitively).
1825    llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV);
1826
1827    // Put the new value in place (primitively).
1828    CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
1829
1830    // Release the old value.
1831    CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
1832
1833  // Otherwise, we can just do a normal lvalue store.
1834  } else {
1835    CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
1836  }
1837
1838  // Jump to the continuation block.
1839  if (!provablyNonNull)
1840    CGF.EmitBlock(contBB);
1841}
1842
1843static void emitWritebacks(CodeGenFunction &CGF,
1844                           const CallArgList &args) {
1845  for (CallArgList::writeback_iterator
1846         i = args.writeback_begin(), e = args.writeback_end(); i != e; ++i)
1847    emitWriteback(CGF, *i);
1848}
1849
1850static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
1851                                            const CallArgList &CallArgs) {
1852  assert(CGF.getTarget().getCXXABI().isArgumentDestroyedByCallee());
1853  ArrayRef<CallArgList::CallArgCleanup> Cleanups =
1854    CallArgs.getCleanupsToDeactivate();
1855  // Iterate in reverse to increase the likelihood of popping the cleanup.
1856  for (ArrayRef<CallArgList::CallArgCleanup>::reverse_iterator
1857         I = Cleanups.rbegin(), E = Cleanups.rend(); I != E; ++I) {
1858    CGF.DeactivateCleanupBlock(I->Cleanup, I->IsActiveIP);
1859    I->IsActiveIP->eraseFromParent();
1860  }
1861}
1862
1863static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
1864  if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
1865    if (uop->getOpcode() == UO_AddrOf)
1866      return uop->getSubExpr();
1867  return 0;
1868}
1869
1870/// Emit an argument that's being passed call-by-writeback.  That is,
1871/// we are passing the address of
1872static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
1873                             const ObjCIndirectCopyRestoreExpr *CRE) {
1874  LValue srcLV;
1875
1876  // Make an optimistic effort to emit the address as an l-value.
1877  // This can fail if the the argument expression is more complicated.
1878  if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
1879    srcLV = CGF.EmitLValue(lvExpr);
1880
1881  // Otherwise, just emit it as a scalar.
1882  } else {
1883    llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
1884
1885    QualType srcAddrType =
1886      CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
1887    srcLV = CGF.MakeNaturalAlignAddrLValue(srcAddr, srcAddrType);
1888  }
1889  llvm::Value *srcAddr = srcLV.getAddress();
1890
1891  // The dest and src types don't necessarily match in LLVM terms
1892  // because of the crazy ObjC compatibility rules.
1893
1894  llvm::PointerType *destType =
1895    cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
1896
1897  // If the address is a constant null, just pass the appropriate null.
1898  if (isProvablyNull(srcAddr)) {
1899    args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
1900             CRE->getType());
1901    return;
1902  }
1903
1904  // Create the temporary.
1905  llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(),
1906                                           "icr.temp");
1907  // Loading an l-value can introduce a cleanup if the l-value is __weak,
1908  // and that cleanup will be conditional if we can't prove that the l-value
1909  // isn't null, so we need to register a dominating point so that the cleanups
1910  // system will make valid IR.
1911  CodeGenFunction::ConditionalEvaluation condEval(CGF);
1912
1913  // Zero-initialize it if we're not doing a copy-initialization.
1914  bool shouldCopy = CRE->shouldCopy();
1915  if (!shouldCopy) {
1916    llvm::Value *null =
1917      llvm::ConstantPointerNull::get(
1918        cast<llvm::PointerType>(destType->getElementType()));
1919    CGF.Builder.CreateStore(null, temp);
1920  }
1921
1922  llvm::BasicBlock *contBB = 0;
1923  llvm::BasicBlock *originBB = 0;
1924
1925  // If the address is *not* known to be non-null, we need to switch.
1926  llvm::Value *finalArgument;
1927
1928  bool provablyNonNull = isProvablyNonNull(srcAddr);
1929  if (provablyNonNull) {
1930    finalArgument = temp;
1931  } else {
1932    llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
1933
1934    finalArgument = CGF.Builder.CreateSelect(isNull,
1935                                   llvm::ConstantPointerNull::get(destType),
1936                                             temp, "icr.argument");
1937
1938    // If we need to copy, then the load has to be conditional, which
1939    // means we need control flow.
1940    if (shouldCopy) {
1941      originBB = CGF.Builder.GetInsertBlock();
1942      contBB = CGF.createBasicBlock("icr.cont");
1943      llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
1944      CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
1945      CGF.EmitBlock(copyBB);
1946      condEval.begin(CGF);
1947    }
1948  }
1949
1950  llvm::Value *valueToUse = 0;
1951
1952  // Perform a copy if necessary.
1953  if (shouldCopy) {
1954    RValue srcRV = CGF.EmitLoadOfLValue(srcLV);
1955    assert(srcRV.isScalar());
1956
1957    llvm::Value *src = srcRV.getScalarVal();
1958    src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
1959                                    "icr.cast");
1960
1961    // Use an ordinary store, not a store-to-lvalue.
1962    CGF.Builder.CreateStore(src, temp);
1963
1964    // If optimization is enabled, and the value was held in a
1965    // __strong variable, we need to tell the optimizer that this
1966    // value has to stay alive until we're doing the store back.
1967    // This is because the temporary is effectively unretained,
1968    // and so otherwise we can violate the high-level semantics.
1969    if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
1970        srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
1971      valueToUse = src;
1972    }
1973  }
1974
1975  // Finish the control flow if we needed it.
1976  if (shouldCopy && !provablyNonNull) {
1977    llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
1978    CGF.EmitBlock(contBB);
1979
1980    // Make a phi for the value to intrinsically use.
1981    if (valueToUse) {
1982      llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
1983                                                      "icr.to-use");
1984      phiToUse->addIncoming(valueToUse, copyBB);
1985      phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
1986                            originBB);
1987      valueToUse = phiToUse;
1988    }
1989
1990    condEval.end(CGF);
1991  }
1992
1993  args.addWriteback(srcLV, temp, valueToUse);
1994  args.add(RValue::get(finalArgument), CRE->getType());
1995}
1996
1997void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
1998                                  QualType type) {
1999  if (const ObjCIndirectCopyRestoreExpr *CRE
2000        = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
2001    assert(getLangOpts().ObjCAutoRefCount);
2002    assert(getContext().hasSameType(E->getType(), type));
2003    return emitWritebackArg(*this, args, CRE);
2004  }
2005
2006  assert(type->isReferenceType() == E->isGLValue() &&
2007         "reference binding to unmaterialized r-value!");
2008
2009  if (E->isGLValue()) {
2010    assert(E->getObjectKind() == OK_Ordinary);
2011    return args.add(EmitReferenceBindingToExpr(E), type);
2012  }
2013
2014  bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
2015
2016  // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
2017  // However, we still have to push an EH-only cleanup in case we unwind before
2018  // we make it to the call.
2019  if (HasAggregateEvalKind &&
2020      CGM.getTarget().getCXXABI().isArgumentDestroyedByCallee()) {
2021    const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
2022    if (RD && RD->hasNonTrivialDestructor()) {
2023      AggValueSlot Slot = CreateAggTemp(type, "agg.arg.tmp");
2024      Slot.setExternallyDestructed();
2025      EmitAggExpr(E, Slot);
2026      RValue RV = Slot.asRValue();
2027      args.add(RV, type);
2028
2029      pushDestroy(EHCleanup, RV.getAggregateAddr(), type, destroyCXXObject,
2030                  /*useEHCleanupForArray*/ true);
2031      // This unreachable is a temporary marker which will be removed later.
2032      llvm::Instruction *IsActive = Builder.CreateUnreachable();
2033      args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
2034      return;
2035    }
2036  }
2037
2038  if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
2039      cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
2040    LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
2041    assert(L.isSimple());
2042    if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) {
2043      args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
2044    } else {
2045      // We can't represent a misaligned lvalue in the CallArgList, so copy
2046      // to an aligned temporary now.
2047      llvm::Value *tmp = CreateMemTemp(type);
2048      EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile(),
2049                        L.getAlignment());
2050      args.add(RValue::getAggregate(tmp), type);
2051    }
2052    return;
2053  }
2054
2055  args.add(EmitAnyExprToTemp(E), type);
2056}
2057
2058// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
2059// optimizer it can aggressively ignore unwind edges.
2060void
2061CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
2062  if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
2063      !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
2064    Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
2065                      CGM.getNoObjCARCExceptionsMetadata());
2066}
2067
2068/// Emits a call to the given no-arguments nounwind runtime function.
2069llvm::CallInst *
2070CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
2071                                         const llvm::Twine &name) {
2072  return EmitNounwindRuntimeCall(callee, ArrayRef<llvm::Value*>(), name);
2073}
2074
2075/// Emits a call to the given nounwind runtime function.
2076llvm::CallInst *
2077CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
2078                                         ArrayRef<llvm::Value*> args,
2079                                         const llvm::Twine &name) {
2080  llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
2081  call->setDoesNotThrow();
2082  return call;
2083}
2084
2085/// Emits a simple call (never an invoke) to the given no-arguments
2086/// runtime function.
2087llvm::CallInst *
2088CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
2089                                 const llvm::Twine &name) {
2090  return EmitRuntimeCall(callee, ArrayRef<llvm::Value*>(), name);
2091}
2092
2093/// Emits a simple call (never an invoke) to the given runtime
2094/// function.
2095llvm::CallInst *
2096CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
2097                                 ArrayRef<llvm::Value*> args,
2098                                 const llvm::Twine &name) {
2099  llvm::CallInst *call = Builder.CreateCall(callee, args, name);
2100  call->setCallingConv(getRuntimeCC());
2101  return call;
2102}
2103
2104/// Emits a call or invoke to the given noreturn runtime function.
2105void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
2106                                               ArrayRef<llvm::Value*> args) {
2107  if (getInvokeDest()) {
2108    llvm::InvokeInst *invoke =
2109      Builder.CreateInvoke(callee,
2110                           getUnreachableBlock(),
2111                           getInvokeDest(),
2112                           args);
2113    invoke->setDoesNotReturn();
2114    invoke->setCallingConv(getRuntimeCC());
2115  } else {
2116    llvm::CallInst *call = Builder.CreateCall(callee, args);
2117    call->setDoesNotReturn();
2118    call->setCallingConv(getRuntimeCC());
2119    Builder.CreateUnreachable();
2120  }
2121}
2122
2123/// Emits a call or invoke instruction to the given nullary runtime
2124/// function.
2125llvm::CallSite
2126CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
2127                                         const Twine &name) {
2128  return EmitRuntimeCallOrInvoke(callee, ArrayRef<llvm::Value*>(), name);
2129}
2130
2131/// Emits a call or invoke instruction to the given runtime function.
2132llvm::CallSite
2133CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
2134                                         ArrayRef<llvm::Value*> args,
2135                                         const Twine &name) {
2136  llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
2137  callSite.setCallingConv(getRuntimeCC());
2138  return callSite;
2139}
2140
2141llvm::CallSite
2142CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
2143                                  const Twine &Name) {
2144  return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name);
2145}
2146
2147/// Emits a call or invoke instruction to the given function, depending
2148/// on the current state of the EH stack.
2149llvm::CallSite
2150CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
2151                                  ArrayRef<llvm::Value *> Args,
2152                                  const Twine &Name) {
2153  llvm::BasicBlock *InvokeDest = getInvokeDest();
2154
2155  llvm::Instruction *Inst;
2156  if (!InvokeDest)
2157    Inst = Builder.CreateCall(Callee, Args, Name);
2158  else {
2159    llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
2160    Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name);
2161    EmitBlock(ContBB);
2162  }
2163
2164  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
2165  // optimizer it can aggressively ignore unwind edges.
2166  if (CGM.getLangOpts().ObjCAutoRefCount)
2167    AddObjCARCExceptionMetadata(Inst);
2168
2169  return Inst;
2170}
2171
2172static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo,
2173                            llvm::FunctionType *FTy) {
2174  if (ArgNo < FTy->getNumParams())
2175    assert(Elt->getType() == FTy->getParamType(ArgNo));
2176  else
2177    assert(FTy->isVarArg());
2178  ++ArgNo;
2179}
2180
2181void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
2182                                       SmallVectorImpl<llvm::Value *> &Args,
2183                                       llvm::FunctionType *IRFuncTy) {
2184  if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
2185    unsigned NumElts = AT->getSize().getZExtValue();
2186    QualType EltTy = AT->getElementType();
2187    llvm::Value *Addr = RV.getAggregateAddr();
2188    for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
2189      llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
2190      RValue EltRV = convertTempToRValue(EltAddr, EltTy);
2191      ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy);
2192    }
2193  } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
2194    RecordDecl *RD = RT->getDecl();
2195    assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
2196    LValue LV = MakeAddrLValue(RV.getAggregateAddr(), Ty);
2197
2198    if (RD->isUnion()) {
2199      const FieldDecl *LargestFD = 0;
2200      CharUnits UnionSize = CharUnits::Zero();
2201
2202      for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2203           i != e; ++i) {
2204        const FieldDecl *FD = *i;
2205        assert(!FD->isBitField() &&
2206               "Cannot expand structure with bit-field members.");
2207        CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
2208        if (UnionSize < FieldSize) {
2209          UnionSize = FieldSize;
2210          LargestFD = FD;
2211        }
2212      }
2213      if (LargestFD) {
2214        RValue FldRV = EmitRValueForField(LV, LargestFD);
2215        ExpandTypeToArgs(LargestFD->getType(), FldRV, Args, IRFuncTy);
2216      }
2217    } else {
2218      for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2219           i != e; ++i) {
2220        FieldDecl *FD = *i;
2221
2222        RValue FldRV = EmitRValueForField(LV, FD);
2223        ExpandTypeToArgs(FD->getType(), FldRV, Args, IRFuncTy);
2224      }
2225    }
2226  } else if (Ty->isAnyComplexType()) {
2227    ComplexPairTy CV = RV.getComplexVal();
2228    Args.push_back(CV.first);
2229    Args.push_back(CV.second);
2230  } else {
2231    assert(RV.isScalar() &&
2232           "Unexpected non-scalar rvalue during struct expansion.");
2233
2234    // Insert a bitcast as needed.
2235    llvm::Value *V = RV.getScalarVal();
2236    if (Args.size() < IRFuncTy->getNumParams() &&
2237        V->getType() != IRFuncTy->getParamType(Args.size()))
2238      V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size()));
2239
2240    Args.push_back(V);
2241  }
2242}
2243
2244
2245RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
2246                                 llvm::Value *Callee,
2247                                 ReturnValueSlot ReturnValue,
2248                                 const CallArgList &CallArgs,
2249                                 const Decl *TargetDecl,
2250                                 llvm::Instruction **callOrInvoke) {
2251  // FIXME: We no longer need the types from CallArgs; lift up and simplify.
2252  SmallVector<llvm::Value*, 16> Args;
2253
2254  // Handle struct-return functions by passing a pointer to the
2255  // location that we would like to return into.
2256  QualType RetTy = CallInfo.getReturnType();
2257  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
2258
2259  // IRArgNo - Keep track of the argument number in the callee we're looking at.
2260  unsigned IRArgNo = 0;
2261  llvm::FunctionType *IRFuncTy =
2262    cast<llvm::FunctionType>(
2263                  cast<llvm::PointerType>(Callee->getType())->getElementType());
2264
2265  // If the call returns a temporary with struct return, create a temporary
2266  // alloca to hold the result, unless one is given to us.
2267  if (CGM.ReturnTypeUsesSRet(CallInfo)) {
2268    llvm::Value *Value = ReturnValue.getValue();
2269    if (!Value)
2270      Value = CreateMemTemp(RetTy);
2271    Args.push_back(Value);
2272    checkArgMatches(Value, IRArgNo, IRFuncTy);
2273  }
2274
2275  assert(CallInfo.arg_size() == CallArgs.size() &&
2276         "Mismatch between function signature & arguments.");
2277  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
2278  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
2279       I != E; ++I, ++info_it) {
2280    const ABIArgInfo &ArgInfo = info_it->info;
2281    RValue RV = I->RV;
2282
2283    CharUnits TypeAlign = getContext().getTypeAlignInChars(I->Ty);
2284
2285    // Insert a padding argument to ensure proper alignment.
2286    if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) {
2287      Args.push_back(llvm::UndefValue::get(PaddingType));
2288      ++IRArgNo;
2289    }
2290
2291    switch (ArgInfo.getKind()) {
2292    case ABIArgInfo::Indirect: {
2293      if (RV.isScalar() || RV.isComplex()) {
2294        // Make a temporary alloca to pass the argument.
2295        llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
2296        if (ArgInfo.getIndirectAlign() > AI->getAlignment())
2297          AI->setAlignment(ArgInfo.getIndirectAlign());
2298        Args.push_back(AI);
2299
2300        LValue argLV =
2301          MakeAddrLValue(Args.back(), I->Ty, TypeAlign);
2302
2303        if (RV.isScalar())
2304          EmitStoreOfScalar(RV.getScalarVal(), argLV, /*init*/ true);
2305        else
2306          EmitStoreOfComplex(RV.getComplexVal(), argLV, /*init*/ true);
2307
2308        // Validate argument match.
2309        checkArgMatches(AI, IRArgNo, IRFuncTy);
2310      } else {
2311        // We want to avoid creating an unnecessary temporary+copy here;
2312        // however, we need one in three cases:
2313        // 1. If the argument is not byval, and we are required to copy the
2314        //    source.  (This case doesn't occur on any common architecture.)
2315        // 2. If the argument is byval, RV is not sufficiently aligned, and
2316        //    we cannot force it to be sufficiently aligned.
2317        // 3. If the argument is byval, but RV is located in an address space
2318        //    different than that of the argument (0).
2319        llvm::Value *Addr = RV.getAggregateAddr();
2320        unsigned Align = ArgInfo.getIndirectAlign();
2321        const llvm::DataLayout *TD = &CGM.getDataLayout();
2322        const unsigned RVAddrSpace = Addr->getType()->getPointerAddressSpace();
2323        const unsigned ArgAddrSpace = (IRArgNo < IRFuncTy->getNumParams() ?
2324          IRFuncTy->getParamType(IRArgNo)->getPointerAddressSpace() : 0);
2325        if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
2326            (ArgInfo.getIndirectByVal() && TypeAlign.getQuantity() < Align &&
2327             llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align) ||
2328             (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) {
2329          // Create an aligned temporary, and copy to it.
2330          llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
2331          if (Align > AI->getAlignment())
2332            AI->setAlignment(Align);
2333          Args.push_back(AI);
2334          EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
2335
2336          // Validate argument match.
2337          checkArgMatches(AI, IRArgNo, IRFuncTy);
2338        } else {
2339          // Skip the extra memcpy call.
2340          Args.push_back(Addr);
2341
2342          // Validate argument match.
2343          checkArgMatches(Addr, IRArgNo, IRFuncTy);
2344        }
2345      }
2346      break;
2347    }
2348
2349    case ABIArgInfo::Ignore:
2350      break;
2351
2352    case ABIArgInfo::Extend:
2353    case ABIArgInfo::Direct: {
2354      if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
2355          ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
2356          ArgInfo.getDirectOffset() == 0) {
2357        llvm::Value *V;
2358        if (RV.isScalar())
2359          V = RV.getScalarVal();
2360        else
2361          V = Builder.CreateLoad(RV.getAggregateAddr());
2362
2363        // If the argument doesn't match, perform a bitcast to coerce it.  This
2364        // can happen due to trivial type mismatches.
2365        if (IRArgNo < IRFuncTy->getNumParams() &&
2366            V->getType() != IRFuncTy->getParamType(IRArgNo))
2367          V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo));
2368        Args.push_back(V);
2369
2370        checkArgMatches(V, IRArgNo, IRFuncTy);
2371        break;
2372      }
2373
2374      // FIXME: Avoid the conversion through memory if possible.
2375      llvm::Value *SrcPtr;
2376      if (RV.isScalar() || RV.isComplex()) {
2377        SrcPtr = CreateMemTemp(I->Ty, "coerce");
2378        LValue SrcLV = MakeAddrLValue(SrcPtr, I->Ty, TypeAlign);
2379        if (RV.isScalar()) {
2380          EmitStoreOfScalar(RV.getScalarVal(), SrcLV, /*init*/ true);
2381        } else {
2382          EmitStoreOfComplex(RV.getComplexVal(), SrcLV, /*init*/ true);
2383        }
2384      } else
2385        SrcPtr = RV.getAggregateAddr();
2386
2387      // If the value is offset in memory, apply the offset now.
2388      if (unsigned Offs = ArgInfo.getDirectOffset()) {
2389        SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
2390        SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
2391        SrcPtr = Builder.CreateBitCast(SrcPtr,
2392                       llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
2393
2394      }
2395
2396      // If the coerce-to type is a first class aggregate, we flatten it and
2397      // pass the elements. Either way is semantically identical, but fast-isel
2398      // and the optimizer generally likes scalar values better than FCAs.
2399      if (llvm::StructType *STy =
2400            dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
2401        llvm::Type *SrcTy =
2402          cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
2403        uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
2404        uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
2405
2406        // If the source type is smaller than the destination type of the
2407        // coerce-to logic, copy the source value into a temp alloca the size
2408        // of the destination type to allow loading all of it. The bits past
2409        // the source value are left undef.
2410        if (SrcSize < DstSize) {
2411          llvm::AllocaInst *TempAlloca
2412            = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce");
2413          Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0);
2414          SrcPtr = TempAlloca;
2415        } else {
2416          SrcPtr = Builder.CreateBitCast(SrcPtr,
2417                                         llvm::PointerType::getUnqual(STy));
2418        }
2419
2420        for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2421          llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
2422          llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
2423          // We don't know what we're loading from.
2424          LI->setAlignment(1);
2425          Args.push_back(LI);
2426
2427          // Validate argument match.
2428          checkArgMatches(LI, IRArgNo, IRFuncTy);
2429        }
2430      } else {
2431        // In the simple case, just pass the coerced loaded value.
2432        Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
2433                                         *this));
2434
2435        // Validate argument match.
2436        checkArgMatches(Args.back(), IRArgNo, IRFuncTy);
2437      }
2438
2439      break;
2440    }
2441
2442    case ABIArgInfo::Expand:
2443      ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy);
2444      IRArgNo = Args.size();
2445      break;
2446    }
2447  }
2448
2449  if (!CallArgs.getCleanupsToDeactivate().empty())
2450    deactivateArgCleanupsBeforeCall(*this, CallArgs);
2451
2452  // If the callee is a bitcast of a function to a varargs pointer to function
2453  // type, check to see if we can remove the bitcast.  This handles some cases
2454  // with unprototyped functions.
2455  if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
2456    if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
2457      llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
2458      llvm::FunctionType *CurFT =
2459        cast<llvm::FunctionType>(CurPT->getElementType());
2460      llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
2461
2462      if (CE->getOpcode() == llvm::Instruction::BitCast &&
2463          ActualFT->getReturnType() == CurFT->getReturnType() &&
2464          ActualFT->getNumParams() == CurFT->getNumParams() &&
2465          ActualFT->getNumParams() == Args.size() &&
2466          (CurFT->isVarArg() || !ActualFT->isVarArg())) {
2467        bool ArgsMatch = true;
2468        for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
2469          if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
2470            ArgsMatch = false;
2471            break;
2472          }
2473
2474        // Strip the cast if we can get away with it.  This is a nice cleanup,
2475        // but also allows us to inline the function at -O0 if it is marked
2476        // always_inline.
2477        if (ArgsMatch)
2478          Callee = CalleeF;
2479      }
2480    }
2481
2482  unsigned CallingConv;
2483  CodeGen::AttributeListType AttributeList;
2484  CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList,
2485                             CallingConv, true);
2486  llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(),
2487                                                     AttributeList);
2488
2489  llvm::BasicBlock *InvokeDest = 0;
2490  if (!Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex,
2491                          llvm::Attribute::NoUnwind))
2492    InvokeDest = getInvokeDest();
2493
2494  llvm::CallSite CS;
2495  if (!InvokeDest) {
2496    CS = Builder.CreateCall(Callee, Args);
2497  } else {
2498    llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
2499    CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args);
2500    EmitBlock(Cont);
2501  }
2502  if (callOrInvoke)
2503    *callOrInvoke = CS.getInstruction();
2504
2505  CS.setAttributes(Attrs);
2506  CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
2507
2508  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
2509  // optimizer it can aggressively ignore unwind edges.
2510  if (CGM.getLangOpts().ObjCAutoRefCount)
2511    AddObjCARCExceptionMetadata(CS.getInstruction());
2512
2513  // If the call doesn't return, finish the basic block and clear the
2514  // insertion point; this allows the rest of IRgen to discard
2515  // unreachable code.
2516  if (CS.doesNotReturn()) {
2517    Builder.CreateUnreachable();
2518    Builder.ClearInsertionPoint();
2519
2520    // FIXME: For now, emit a dummy basic block because expr emitters in
2521    // generally are not ready to handle emitting expressions at unreachable
2522    // points.
2523    EnsureInsertPoint();
2524
2525    // Return a reasonable RValue.
2526    return GetUndefRValue(RetTy);
2527  }
2528
2529  llvm::Instruction *CI = CS.getInstruction();
2530  if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
2531    CI->setName("call");
2532
2533  // Emit any writebacks immediately.  Arguably this should happen
2534  // after any return-value munging.
2535  if (CallArgs.hasWritebacks())
2536    emitWritebacks(*this, CallArgs);
2537
2538  switch (RetAI.getKind()) {
2539  case ABIArgInfo::Indirect:
2540    return convertTempToRValue(Args[0], RetTy);
2541
2542  case ABIArgInfo::Ignore:
2543    // If we are ignoring an argument that had a result, make sure to
2544    // construct the appropriate return value for our caller.
2545    return GetUndefRValue(RetTy);
2546
2547  case ABIArgInfo::Extend:
2548  case ABIArgInfo::Direct: {
2549    llvm::Type *RetIRTy = ConvertType(RetTy);
2550    if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
2551      switch (getEvaluationKind(RetTy)) {
2552      case TEK_Complex: {
2553        llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
2554        llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
2555        return RValue::getComplex(std::make_pair(Real, Imag));
2556      }
2557      case TEK_Aggregate: {
2558        llvm::Value *DestPtr = ReturnValue.getValue();
2559        bool DestIsVolatile = ReturnValue.isVolatile();
2560
2561        if (!DestPtr) {
2562          DestPtr = CreateMemTemp(RetTy, "agg.tmp");
2563          DestIsVolatile = false;
2564        }
2565        BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
2566        return RValue::getAggregate(DestPtr);
2567      }
2568      case TEK_Scalar: {
2569        // If the argument doesn't match, perform a bitcast to coerce it.  This
2570        // can happen due to trivial type mismatches.
2571        llvm::Value *V = CI;
2572        if (V->getType() != RetIRTy)
2573          V = Builder.CreateBitCast(V, RetIRTy);
2574        return RValue::get(V);
2575      }
2576      }
2577      llvm_unreachable("bad evaluation kind");
2578    }
2579
2580    llvm::Value *DestPtr = ReturnValue.getValue();
2581    bool DestIsVolatile = ReturnValue.isVolatile();
2582
2583    if (!DestPtr) {
2584      DestPtr = CreateMemTemp(RetTy, "coerce");
2585      DestIsVolatile = false;
2586    }
2587
2588    // If the value is offset in memory, apply the offset now.
2589    llvm::Value *StorePtr = DestPtr;
2590    if (unsigned Offs = RetAI.getDirectOffset()) {
2591      StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
2592      StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
2593      StorePtr = Builder.CreateBitCast(StorePtr,
2594                         llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
2595    }
2596    CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
2597
2598    return convertTempToRValue(DestPtr, RetTy);
2599  }
2600
2601  case ABIArgInfo::Expand:
2602    llvm_unreachable("Invalid ABI kind for return argument");
2603  }
2604
2605  llvm_unreachable("Unhandled ABIArgInfo::Kind");
2606}
2607
2608/* VarArg handling */
2609
2610llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
2611  return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
2612}
2613