CGCall.cpp revision 651f13cea278ec967336033dd032faef0e9fc2ec
1//===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// These classes wrap the information about a call or function
11// definition used to handle ABI compliancy.
12//
13//===----------------------------------------------------------------------===//
14
15#include "CGCall.h"
16#include "ABIInfo.h"
17#include "CGCXXABI.h"
18#include "CodeGenFunction.h"
19#include "CodeGenModule.h"
20#include "TargetInfo.h"
21#include "clang/AST/Decl.h"
22#include "clang/AST/DeclCXX.h"
23#include "clang/AST/DeclObjC.h"
24#include "clang/Basic/TargetInfo.h"
25#include "clang/CodeGen/CGFunctionInfo.h"
26#include "clang/Frontend/CodeGenOptions.h"
27#include "llvm/ADT/StringExtras.h"
28#include "llvm/IR/Attributes.h"
29#include "llvm/IR/CallSite.h"
30#include "llvm/IR/DataLayout.h"
31#include "llvm/IR/InlineAsm.h"
32#include "llvm/IR/Intrinsics.h"
33#include "llvm/Transforms/Utils/Local.h"
34using namespace clang;
35using namespace CodeGen;
36
37/***/
38
39static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
40  switch (CC) {
41  default: return llvm::CallingConv::C;
42  case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
43  case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
44  case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
45  case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64;
46  case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
47  case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
48  case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
49  case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
50  // TODO: add support for CC_X86Pascal to llvm
51  }
52}
53
54/// Derives the 'this' type for codegen purposes, i.e. ignoring method
55/// qualification.
56/// FIXME: address space qualification?
57static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
58  QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
59  return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
60}
61
62/// Returns the canonical formal type of the given C++ method.
63static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
64  return MD->getType()->getCanonicalTypeUnqualified()
65           .getAs<FunctionProtoType>();
66}
67
68/// Returns the "extra-canonicalized" return type, which discards
69/// qualifiers on the return type.  Codegen doesn't care about them,
70/// and it makes ABI code a little easier to be able to assume that
71/// all parameter and return types are top-level unqualified.
72static CanQualType GetReturnType(QualType RetTy) {
73  return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
74}
75
76/// Arrange the argument and result information for a value of the given
77/// unprototyped freestanding function type.
78const CGFunctionInfo &
79CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
80  // When translating an unprototyped function type, always use a
81  // variadic type.
82  return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
83                                 false, None, FTNP->getExtInfo(),
84                                 RequiredArgs(0));
85}
86
87/// Arrange the LLVM function layout for a value of the given function
88/// type, on top of any implicit parameters already stored.  Use the
89/// given ExtInfo instead of the ExtInfo from the function type.
90static const CGFunctionInfo &arrangeLLVMFunctionInfo(CodeGenTypes &CGT,
91                                                     bool IsInstanceMethod,
92                                       SmallVectorImpl<CanQualType> &prefix,
93                                             CanQual<FunctionProtoType> FTP,
94                                              FunctionType::ExtInfo extInfo) {
95  RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
96  // FIXME: Kill copy.
97  for (unsigned i = 0, e = FTP->getNumParams(); i != e; ++i)
98    prefix.push_back(FTP->getParamType(i));
99  CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
100  return CGT.arrangeLLVMFunctionInfo(resultType, IsInstanceMethod, prefix,
101                                     extInfo, required);
102}
103
104/// Arrange the argument and result information for a free function (i.e.
105/// not a C++ or ObjC instance method) of the given type.
106static const CGFunctionInfo &arrangeFreeFunctionType(CodeGenTypes &CGT,
107                                      SmallVectorImpl<CanQualType> &prefix,
108                                            CanQual<FunctionProtoType> FTP) {
109  return arrangeLLVMFunctionInfo(CGT, false, prefix, FTP, FTP->getExtInfo());
110}
111
112/// Arrange the argument and result information for a free function (i.e.
113/// not a C++ or ObjC instance method) of the given type.
114static const CGFunctionInfo &arrangeCXXMethodType(CodeGenTypes &CGT,
115                                      SmallVectorImpl<CanQualType> &prefix,
116                                            CanQual<FunctionProtoType> FTP) {
117  FunctionType::ExtInfo extInfo = FTP->getExtInfo();
118  return arrangeLLVMFunctionInfo(CGT, true, prefix, FTP, extInfo);
119}
120
121/// Arrange the argument and result information for a value of the
122/// given freestanding function type.
123const CGFunctionInfo &
124CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
125  SmallVector<CanQualType, 16> argTypes;
126  return ::arrangeFreeFunctionType(*this, argTypes, FTP);
127}
128
129static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
130  // Set the appropriate calling convention for the Function.
131  if (D->hasAttr<StdCallAttr>())
132    return CC_X86StdCall;
133
134  if (D->hasAttr<FastCallAttr>())
135    return CC_X86FastCall;
136
137  if (D->hasAttr<ThisCallAttr>())
138    return CC_X86ThisCall;
139
140  if (D->hasAttr<PascalAttr>())
141    return CC_X86Pascal;
142
143  if (PcsAttr *PCS = D->getAttr<PcsAttr>())
144    return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
145
146  if (D->hasAttr<PnaclCallAttr>())
147    return CC_PnaclCall;
148
149  if (D->hasAttr<IntelOclBiccAttr>())
150    return CC_IntelOclBicc;
151
152  if (D->hasAttr<MSABIAttr>())
153    return IsWindows ? CC_C : CC_X86_64Win64;
154
155  if (D->hasAttr<SysVABIAttr>())
156    return IsWindows ? CC_X86_64SysV : CC_C;
157
158  return CC_C;
159}
160
161/// Arrange the argument and result information for a call to an
162/// unknown C++ non-static member function of the given abstract type.
163/// (Zero value of RD means we don't have any meaningful "this" argument type,
164///  so fall back to a generic pointer type).
165/// The member function must be an ordinary function, i.e. not a
166/// constructor or destructor.
167const CGFunctionInfo &
168CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
169                                   const FunctionProtoType *FTP) {
170  SmallVector<CanQualType, 16> argTypes;
171
172  // Add the 'this' pointer.
173  if (RD)
174    argTypes.push_back(GetThisType(Context, RD));
175  else
176    argTypes.push_back(Context.VoidPtrTy);
177
178  return ::arrangeCXXMethodType(*this, argTypes,
179              FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
180}
181
182/// Arrange the argument and result information for a declaration or
183/// definition of the given C++ non-static member function.  The
184/// member function must be an ordinary function, i.e. not a
185/// constructor or destructor.
186const CGFunctionInfo &
187CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
188  assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
189  assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
190
191  CanQual<FunctionProtoType> prototype = GetFormalType(MD);
192
193  if (MD->isInstance()) {
194    // The abstract case is perfectly fine.
195    const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
196    return arrangeCXXMethodType(ThisType, prototype.getTypePtr());
197  }
198
199  return arrangeFreeFunctionType(prototype);
200}
201
202/// Arrange the argument and result information for a declaration
203/// or definition to the given constructor variant.
204const CGFunctionInfo &
205CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D,
206                                               CXXCtorType ctorKind) {
207  SmallVector<CanQualType, 16> argTypes;
208  argTypes.push_back(GetThisType(Context, D->getParent()));
209
210  GlobalDecl GD(D, ctorKind);
211  CanQualType resultType =
212    TheCXXABI.HasThisReturn(GD) ? argTypes.front() : Context.VoidTy;
213
214  CanQual<FunctionProtoType> FTP = GetFormalType(D);
215
216  // Add the formal parameters.
217  for (unsigned i = 0, e = FTP->getNumParams(); i != e; ++i)
218    argTypes.push_back(FTP->getParamType(i));
219
220  TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes);
221
222  RequiredArgs required =
223      (D->isVariadic() ? RequiredArgs(argTypes.size()) : RequiredArgs::All);
224
225  FunctionType::ExtInfo extInfo = FTP->getExtInfo();
226  return arrangeLLVMFunctionInfo(resultType, true, argTypes, extInfo, required);
227}
228
229/// Arrange a call to a C++ method, passing the given arguments.
230const CGFunctionInfo &
231CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
232                                        const CXXConstructorDecl *D,
233                                        CXXCtorType CtorKind,
234                                        unsigned ExtraArgs) {
235  // FIXME: Kill copy.
236  SmallVector<CanQualType, 16> ArgTypes;
237  for (CallArgList::const_iterator i = args.begin(), e = args.end(); i != e;
238       ++i)
239    ArgTypes.push_back(Context.getCanonicalParamType(i->Ty));
240
241  CanQual<FunctionProtoType> FPT = GetFormalType(D);
242  RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs);
243  GlobalDecl GD(D, CtorKind);
244  CanQualType ResultType =
245      TheCXXABI.HasThisReturn(GD) ? ArgTypes.front() : Context.VoidTy;
246
247  FunctionType::ExtInfo Info = FPT->getExtInfo();
248  return arrangeLLVMFunctionInfo(ResultType, true, ArgTypes, Info, Required);
249}
250
251/// Arrange the argument and result information for a declaration,
252/// definition, or call to the given destructor variant.  It so
253/// happens that all three cases produce the same information.
254const CGFunctionInfo &
255CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D,
256                                   CXXDtorType dtorKind) {
257  SmallVector<CanQualType, 2> argTypes;
258  argTypes.push_back(GetThisType(Context, D->getParent()));
259
260  GlobalDecl GD(D, dtorKind);
261  CanQualType resultType =
262    TheCXXABI.HasThisReturn(GD) ? argTypes.front() : Context.VoidTy;
263
264  TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes);
265
266  CanQual<FunctionProtoType> FTP = GetFormalType(D);
267  assert(FTP->getNumParams() == 0 && "dtor with formal parameters");
268  assert(FTP->isVariadic() == 0 && "dtor with formal parameters");
269
270  FunctionType::ExtInfo extInfo = FTP->getExtInfo();
271  return arrangeLLVMFunctionInfo(resultType, true, argTypes, extInfo,
272                                 RequiredArgs::All);
273}
274
275/// Arrange the argument and result information for the declaration or
276/// definition of the given function.
277const CGFunctionInfo &
278CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
279  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
280    if (MD->isInstance())
281      return arrangeCXXMethodDeclaration(MD);
282
283  CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
284
285  assert(isa<FunctionType>(FTy));
286
287  // When declaring a function without a prototype, always use a
288  // non-variadic type.
289  if (isa<FunctionNoProtoType>(FTy)) {
290    CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
291    return arrangeLLVMFunctionInfo(noProto->getReturnType(), false, None,
292                                   noProto->getExtInfo(), RequiredArgs::All);
293  }
294
295  assert(isa<FunctionProtoType>(FTy));
296  return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>());
297}
298
299/// Arrange the argument and result information for the declaration or
300/// definition of an Objective-C method.
301const CGFunctionInfo &
302CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
303  // It happens that this is the same as a call with no optional
304  // arguments, except also using the formal 'self' type.
305  return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
306}
307
308/// Arrange the argument and result information for the function type
309/// through which to perform a send to the given Objective-C method,
310/// using the given receiver type.  The receiver type is not always
311/// the 'self' type of the method or even an Objective-C pointer type.
312/// This is *not* the right method for actually performing such a
313/// message send, due to the possibility of optional arguments.
314const CGFunctionInfo &
315CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
316                                              QualType receiverType) {
317  SmallVector<CanQualType, 16> argTys;
318  argTys.push_back(Context.getCanonicalParamType(receiverType));
319  argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
320  // FIXME: Kill copy?
321  for (const auto *I : MD->params()) {
322    argTys.push_back(Context.getCanonicalParamType(I->getType()));
323  }
324
325  FunctionType::ExtInfo einfo;
326  bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
327  einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
328
329  if (getContext().getLangOpts().ObjCAutoRefCount &&
330      MD->hasAttr<NSReturnsRetainedAttr>())
331    einfo = einfo.withProducesResult(true);
332
333  RequiredArgs required =
334    (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
335
336  return arrangeLLVMFunctionInfo(GetReturnType(MD->getReturnType()), false,
337                                 argTys, einfo, required);
338}
339
340const CGFunctionInfo &
341CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
342  // FIXME: Do we need to handle ObjCMethodDecl?
343  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
344
345  if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
346    return arrangeCXXConstructorDeclaration(CD, GD.getCtorType());
347
348  if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
349    return arrangeCXXDestructor(DD, GD.getDtorType());
350
351  return arrangeFunctionDeclaration(FD);
352}
353
354/// Arrange a call as unto a free function, except possibly with an
355/// additional number of formal parameters considered required.
356static const CGFunctionInfo &
357arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
358                            CodeGenModule &CGM,
359                            const CallArgList &args,
360                            const FunctionType *fnType,
361                            unsigned numExtraRequiredArgs) {
362  assert(args.size() >= numExtraRequiredArgs);
363
364  // In most cases, there are no optional arguments.
365  RequiredArgs required = RequiredArgs::All;
366
367  // If we have a variadic prototype, the required arguments are the
368  // extra prefix plus the arguments in the prototype.
369  if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
370    if (proto->isVariadic())
371      required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
372
373  // If we don't have a prototype at all, but we're supposed to
374  // explicitly use the variadic convention for unprototyped calls,
375  // treat all of the arguments as required but preserve the nominal
376  // possibility of variadics.
377  } else if (CGM.getTargetCodeGenInfo()
378                .isNoProtoCallVariadic(args,
379                                       cast<FunctionNoProtoType>(fnType))) {
380    required = RequiredArgs(args.size());
381  }
382
383  return CGT.arrangeFreeFunctionCall(fnType->getReturnType(), args,
384                                     fnType->getExtInfo(), required);
385}
386
387/// Figure out the rules for calling a function with the given formal
388/// type using the given arguments.  The arguments are necessary
389/// because the function might be unprototyped, in which case it's
390/// target-dependent in crazy ways.
391const CGFunctionInfo &
392CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
393                                      const FunctionType *fnType) {
394  return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 0);
395}
396
397/// A block function call is essentially a free-function call with an
398/// extra implicit argument.
399const CGFunctionInfo &
400CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
401                                       const FunctionType *fnType) {
402  return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1);
403}
404
405const CGFunctionInfo &
406CodeGenTypes::arrangeFreeFunctionCall(QualType resultType,
407                                      const CallArgList &args,
408                                      FunctionType::ExtInfo info,
409                                      RequiredArgs required) {
410  // FIXME: Kill copy.
411  SmallVector<CanQualType, 16> argTypes;
412  for (CallArgList::const_iterator i = args.begin(), e = args.end();
413       i != e; ++i)
414    argTypes.push_back(Context.getCanonicalParamType(i->Ty));
415  return arrangeLLVMFunctionInfo(GetReturnType(resultType), false, argTypes,
416                                 info, required);
417}
418
419/// Arrange a call to a C++ method, passing the given arguments.
420const CGFunctionInfo &
421CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
422                                   const FunctionProtoType *FPT,
423                                   RequiredArgs required) {
424  // FIXME: Kill copy.
425  SmallVector<CanQualType, 16> argTypes;
426  for (CallArgList::const_iterator i = args.begin(), e = args.end();
427       i != e; ++i)
428    argTypes.push_back(Context.getCanonicalParamType(i->Ty));
429
430  FunctionType::ExtInfo info = FPT->getExtInfo();
431  return arrangeLLVMFunctionInfo(GetReturnType(FPT->getReturnType()), true,
432                                 argTypes, info, required);
433}
434
435const CGFunctionInfo &CodeGenTypes::arrangeFreeFunctionDeclaration(
436    QualType resultType, const FunctionArgList &args,
437    const FunctionType::ExtInfo &info, bool isVariadic) {
438  // FIXME: Kill copy.
439  SmallVector<CanQualType, 16> argTypes;
440  for (FunctionArgList::const_iterator i = args.begin(), e = args.end();
441       i != e; ++i)
442    argTypes.push_back(Context.getCanonicalParamType((*i)->getType()));
443
444  RequiredArgs required =
445    (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
446  return arrangeLLVMFunctionInfo(GetReturnType(resultType), false, argTypes, info,
447                                 required);
448}
449
450const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
451  return arrangeLLVMFunctionInfo(getContext().VoidTy, false, None,
452                                 FunctionType::ExtInfo(), RequiredArgs::All);
453}
454
455/// Arrange the argument and result information for an abstract value
456/// of a given function type.  This is the method which all of the
457/// above functions ultimately defer to.
458const CGFunctionInfo &
459CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
460                                      bool IsInstanceMethod,
461                                      ArrayRef<CanQualType> argTypes,
462                                      FunctionType::ExtInfo info,
463                                      RequiredArgs required) {
464#ifndef NDEBUG
465  for (ArrayRef<CanQualType>::const_iterator
466         I = argTypes.begin(), E = argTypes.end(); I != E; ++I)
467    assert(I->isCanonicalAsParam());
468#endif
469
470  unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
471
472  // Lookup or create unique function info.
473  llvm::FoldingSetNodeID ID;
474  CGFunctionInfo::Profile(ID, IsInstanceMethod, info, required, resultType,
475                          argTypes);
476
477  void *insertPos = 0;
478  CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
479  if (FI)
480    return *FI;
481
482  // Construct the function info.  We co-allocate the ArgInfos.
483  FI = CGFunctionInfo::create(CC, IsInstanceMethod, info, resultType, argTypes,
484                              required);
485  FunctionInfos.InsertNode(FI, insertPos);
486
487  bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted;
488  assert(inserted && "Recursively being processed?");
489
490  // Compute ABI information.
491  getABIInfo().computeInfo(*FI);
492
493  // Loop over all of the computed argument and return value info.  If any of
494  // them are direct or extend without a specified coerce type, specify the
495  // default now.
496  ABIArgInfo &retInfo = FI->getReturnInfo();
497  if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == 0)
498    retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
499
500  for (auto &I : FI->arguments())
501    if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == 0)
502      I.info.setCoerceToType(ConvertType(I.type));
503
504  bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
505  assert(erased && "Not in set?");
506
507  return *FI;
508}
509
510CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
511                                       bool IsInstanceMethod,
512                                       const FunctionType::ExtInfo &info,
513                                       CanQualType resultType,
514                                       ArrayRef<CanQualType> argTypes,
515                                       RequiredArgs required) {
516  void *buffer = operator new(sizeof(CGFunctionInfo) +
517                              sizeof(ArgInfo) * (argTypes.size() + 1));
518  CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
519  FI->CallingConvention = llvmCC;
520  FI->EffectiveCallingConvention = llvmCC;
521  FI->ASTCallingConvention = info.getCC();
522  FI->InstanceMethod = IsInstanceMethod;
523  FI->NoReturn = info.getNoReturn();
524  FI->ReturnsRetained = info.getProducesResult();
525  FI->Required = required;
526  FI->HasRegParm = info.getHasRegParm();
527  FI->RegParm = info.getRegParm();
528  FI->ArgStruct = 0;
529  FI->NumArgs = argTypes.size();
530  FI->getArgsBuffer()[0].type = resultType;
531  for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
532    FI->getArgsBuffer()[i + 1].type = argTypes[i];
533  return FI;
534}
535
536/***/
537
538void CodeGenTypes::GetExpandedTypes(QualType type,
539                     SmallVectorImpl<llvm::Type*> &expandedTypes) {
540  if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) {
541    uint64_t NumElts = AT->getSize().getZExtValue();
542    for (uint64_t Elt = 0; Elt < NumElts; ++Elt)
543      GetExpandedTypes(AT->getElementType(), expandedTypes);
544  } else if (const RecordType *RT = type->getAs<RecordType>()) {
545    const RecordDecl *RD = RT->getDecl();
546    assert(!RD->hasFlexibleArrayMember() &&
547           "Cannot expand structure with flexible array.");
548    if (RD->isUnion()) {
549      // Unions can be here only in degenerative cases - all the fields are same
550      // after flattening. Thus we have to use the "largest" field.
551      const FieldDecl *LargestFD = 0;
552      CharUnits UnionSize = CharUnits::Zero();
553
554      for (const auto *FD : RD->fields()) {
555        assert(!FD->isBitField() &&
556               "Cannot expand structure with bit-field members.");
557        CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
558        if (UnionSize < FieldSize) {
559          UnionSize = FieldSize;
560          LargestFD = FD;
561        }
562      }
563      if (LargestFD)
564        GetExpandedTypes(LargestFD->getType(), expandedTypes);
565    } else {
566      for (const auto *I : RD->fields()) {
567        assert(!I->isBitField() &&
568               "Cannot expand structure with bit-field members.");
569        GetExpandedTypes(I->getType(), expandedTypes);
570      }
571    }
572  } else if (const ComplexType *CT = type->getAs<ComplexType>()) {
573    llvm::Type *EltTy = ConvertType(CT->getElementType());
574    expandedTypes.push_back(EltTy);
575    expandedTypes.push_back(EltTy);
576  } else
577    expandedTypes.push_back(ConvertType(type));
578}
579
580llvm::Function::arg_iterator
581CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
582                                    llvm::Function::arg_iterator AI) {
583  assert(LV.isSimple() &&
584         "Unexpected non-simple lvalue during struct expansion.");
585
586  if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
587    unsigned NumElts = AT->getSize().getZExtValue();
588    QualType EltTy = AT->getElementType();
589    for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
590      llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, Elt);
591      LValue LV = MakeAddrLValue(EltAddr, EltTy);
592      AI = ExpandTypeFromArgs(EltTy, LV, AI);
593    }
594  } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
595    RecordDecl *RD = RT->getDecl();
596    if (RD->isUnion()) {
597      // Unions can be here only in degenerative cases - all the fields are same
598      // after flattening. Thus we have to use the "largest" field.
599      const FieldDecl *LargestFD = 0;
600      CharUnits UnionSize = CharUnits::Zero();
601
602      for (const auto *FD : RD->fields()) {
603        assert(!FD->isBitField() &&
604               "Cannot expand structure with bit-field members.");
605        CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
606        if (UnionSize < FieldSize) {
607          UnionSize = FieldSize;
608          LargestFD = FD;
609        }
610      }
611      if (LargestFD) {
612        // FIXME: What are the right qualifiers here?
613        LValue SubLV = EmitLValueForField(LV, LargestFD);
614        AI = ExpandTypeFromArgs(LargestFD->getType(), SubLV, AI);
615      }
616    } else {
617      for (const auto *FD : RD->fields()) {
618        QualType FT = FD->getType();
619
620        // FIXME: What are the right qualifiers here?
621        LValue SubLV = EmitLValueForField(LV, FD);
622        AI = ExpandTypeFromArgs(FT, SubLV, AI);
623      }
624    }
625  } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
626    QualType EltTy = CT->getElementType();
627    llvm::Value *RealAddr = Builder.CreateStructGEP(LV.getAddress(), 0, "real");
628    EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy));
629    llvm::Value *ImagAddr = Builder.CreateStructGEP(LV.getAddress(), 1, "imag");
630    EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy));
631  } else {
632    EmitStoreThroughLValue(RValue::get(AI), LV);
633    ++AI;
634  }
635
636  return AI;
637}
638
639/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
640/// accessing some number of bytes out of it, try to gep into the struct to get
641/// at its inner goodness.  Dive as deep as possible without entering an element
642/// with an in-memory size smaller than DstSize.
643static llvm::Value *
644EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
645                                   llvm::StructType *SrcSTy,
646                                   uint64_t DstSize, CodeGenFunction &CGF) {
647  // We can't dive into a zero-element struct.
648  if (SrcSTy->getNumElements() == 0) return SrcPtr;
649
650  llvm::Type *FirstElt = SrcSTy->getElementType(0);
651
652  // If the first elt is at least as large as what we're looking for, or if the
653  // first element is the same size as the whole struct, we can enter it.
654  uint64_t FirstEltSize =
655    CGF.CGM.getDataLayout().getTypeAllocSize(FirstElt);
656  if (FirstEltSize < DstSize &&
657      FirstEltSize < CGF.CGM.getDataLayout().getTypeAllocSize(SrcSTy))
658    return SrcPtr;
659
660  // GEP into the first element.
661  SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
662
663  // If the first element is a struct, recurse.
664  llvm::Type *SrcTy =
665    cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
666  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
667    return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
668
669  return SrcPtr;
670}
671
672/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
673/// are either integers or pointers.  This does a truncation of the value if it
674/// is too large or a zero extension if it is too small.
675///
676/// This behaves as if the value were coerced through memory, so on big-endian
677/// targets the high bits are preserved in a truncation, while little-endian
678/// targets preserve the low bits.
679static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
680                                             llvm::Type *Ty,
681                                             CodeGenFunction &CGF) {
682  if (Val->getType() == Ty)
683    return Val;
684
685  if (isa<llvm::PointerType>(Val->getType())) {
686    // If this is Pointer->Pointer avoid conversion to and from int.
687    if (isa<llvm::PointerType>(Ty))
688      return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
689
690    // Convert the pointer to an integer so we can play with its width.
691    Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
692  }
693
694  llvm::Type *DestIntTy = Ty;
695  if (isa<llvm::PointerType>(DestIntTy))
696    DestIntTy = CGF.IntPtrTy;
697
698  if (Val->getType() != DestIntTy) {
699    const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
700    if (DL.isBigEndian()) {
701      // Preserve the high bits on big-endian targets.
702      // That is what memory coercion does.
703      uint64_t SrcSize = DL.getTypeAllocSizeInBits(Val->getType());
704      uint64_t DstSize = DL.getTypeAllocSizeInBits(DestIntTy);
705      if (SrcSize > DstSize) {
706        Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
707        Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
708      } else {
709        Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
710        Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
711      }
712    } else {
713      // Little-endian targets preserve the low bits. No shifts required.
714      Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
715    }
716  }
717
718  if (isa<llvm::PointerType>(Ty))
719    Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
720  return Val;
721}
722
723
724
725/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
726/// a pointer to an object of type \arg Ty.
727///
728/// This safely handles the case when the src type is smaller than the
729/// destination type; in this situation the values of bits which not
730/// present in the src are undefined.
731static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
732                                      llvm::Type *Ty,
733                                      CodeGenFunction &CGF) {
734  llvm::Type *SrcTy =
735    cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
736
737  // If SrcTy and Ty are the same, just do a load.
738  if (SrcTy == Ty)
739    return CGF.Builder.CreateLoad(SrcPtr);
740
741  uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
742
743  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
744    SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
745    SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
746  }
747
748  uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
749
750  // If the source and destination are integer or pointer types, just do an
751  // extension or truncation to the desired type.
752  if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
753      (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
754    llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
755    return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
756  }
757
758  // If load is legal, just bitcast the src pointer.
759  if (SrcSize >= DstSize) {
760    // Generally SrcSize is never greater than DstSize, since this means we are
761    // losing bits. However, this can happen in cases where the structure has
762    // additional padding, for example due to a user specified alignment.
763    //
764    // FIXME: Assert that we aren't truncating non-padding bits when have access
765    // to that information.
766    llvm::Value *Casted =
767      CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
768    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
769    // FIXME: Use better alignment / avoid requiring aligned load.
770    Load->setAlignment(1);
771    return Load;
772  }
773
774  // Otherwise do coercion through memory. This is stupid, but
775  // simple.
776  llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
777  llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy();
778  llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy);
779  llvm::Value *SrcCasted = CGF.Builder.CreateBitCast(SrcPtr, I8PtrTy);
780  // FIXME: Use better alignment.
781  CGF.Builder.CreateMemCpy(Casted, SrcCasted,
782      llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
783      1, false);
784  return CGF.Builder.CreateLoad(Tmp);
785}
786
787// Function to store a first-class aggregate into memory.  We prefer to
788// store the elements rather than the aggregate to be more friendly to
789// fast-isel.
790// FIXME: Do we need to recurse here?
791static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
792                          llvm::Value *DestPtr, bool DestIsVolatile,
793                          bool LowAlignment) {
794  // Prefer scalar stores to first-class aggregate stores.
795  if (llvm::StructType *STy =
796        dyn_cast<llvm::StructType>(Val->getType())) {
797    for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
798      llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i);
799      llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
800      llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr,
801                                                    DestIsVolatile);
802      if (LowAlignment)
803        SI->setAlignment(1);
804    }
805  } else {
806    llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile);
807    if (LowAlignment)
808      SI->setAlignment(1);
809  }
810}
811
812/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
813/// where the source and destination may have different types.
814///
815/// This safely handles the case when the src type is larger than the
816/// destination type; the upper bits of the src will be lost.
817static void CreateCoercedStore(llvm::Value *Src,
818                               llvm::Value *DstPtr,
819                               bool DstIsVolatile,
820                               CodeGenFunction &CGF) {
821  llvm::Type *SrcTy = Src->getType();
822  llvm::Type *DstTy =
823    cast<llvm::PointerType>(DstPtr->getType())->getElementType();
824  if (SrcTy == DstTy) {
825    CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
826    return;
827  }
828
829  uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
830
831  if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
832    DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
833    DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
834  }
835
836  // If the source and destination are integer or pointer types, just do an
837  // extension or truncation to the desired type.
838  if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
839      (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
840    Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
841    CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
842    return;
843  }
844
845  uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
846
847  // If store is legal, just bitcast the src pointer.
848  if (SrcSize <= DstSize) {
849    llvm::Value *Casted =
850      CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
851    // FIXME: Use better alignment / avoid requiring aligned store.
852    BuildAggStore(CGF, Src, Casted, DstIsVolatile, true);
853  } else {
854    // Otherwise do coercion through memory. This is stupid, but
855    // simple.
856
857    // Generally SrcSize is never greater than DstSize, since this means we are
858    // losing bits. However, this can happen in cases where the structure has
859    // additional padding, for example due to a user specified alignment.
860    //
861    // FIXME: Assert that we aren't truncating non-padding bits when have access
862    // to that information.
863    llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
864    CGF.Builder.CreateStore(Src, Tmp);
865    llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy();
866    llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy);
867    llvm::Value *DstCasted = CGF.Builder.CreateBitCast(DstPtr, I8PtrTy);
868    // FIXME: Use better alignment.
869    CGF.Builder.CreateMemCpy(DstCasted, Casted,
870        llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
871        1, false);
872  }
873}
874
875/***/
876
877bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
878  return FI.getReturnInfo().isIndirect();
879}
880
881bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
882  return ReturnTypeUsesSRet(FI) &&
883         getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
884}
885
886bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
887  if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
888    switch (BT->getKind()) {
889    default:
890      return false;
891    case BuiltinType::Float:
892      return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
893    case BuiltinType::Double:
894      return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
895    case BuiltinType::LongDouble:
896      return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
897    }
898  }
899
900  return false;
901}
902
903bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
904  if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
905    if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
906      if (BT->getKind() == BuiltinType::LongDouble)
907        return getTarget().useObjCFP2RetForComplexLongDouble();
908    }
909  }
910
911  return false;
912}
913
914llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
915  const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
916  return GetFunctionType(FI);
917}
918
919llvm::FunctionType *
920CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
921
922  bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted;
923  assert(Inserted && "Recursively being processed?");
924
925  SmallVector<llvm::Type*, 8> argTypes;
926  llvm::Type *resultType = 0;
927
928  const ABIArgInfo &retAI = FI.getReturnInfo();
929  switch (retAI.getKind()) {
930  case ABIArgInfo::Expand:
931    llvm_unreachable("Invalid ABI kind for return argument");
932
933  case ABIArgInfo::Extend:
934  case ABIArgInfo::Direct:
935    resultType = retAI.getCoerceToType();
936    break;
937
938  case ABIArgInfo::InAlloca:
939    if (retAI.getInAllocaSRet()) {
940      // sret things on win32 aren't void, they return the sret pointer.
941      QualType ret = FI.getReturnType();
942      llvm::Type *ty = ConvertType(ret);
943      unsigned addressSpace = Context.getTargetAddressSpace(ret);
944      resultType = llvm::PointerType::get(ty, addressSpace);
945    } else {
946      resultType = llvm::Type::getVoidTy(getLLVMContext());
947    }
948    break;
949
950  case ABIArgInfo::Indirect: {
951    assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
952    resultType = llvm::Type::getVoidTy(getLLVMContext());
953
954    QualType ret = FI.getReturnType();
955    llvm::Type *ty = ConvertType(ret);
956    unsigned addressSpace = Context.getTargetAddressSpace(ret);
957    argTypes.push_back(llvm::PointerType::get(ty, addressSpace));
958    break;
959  }
960
961  case ABIArgInfo::Ignore:
962    resultType = llvm::Type::getVoidTy(getLLVMContext());
963    break;
964  }
965
966  // Add in all of the required arguments.
967  CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), ie;
968  if (FI.isVariadic()) {
969    ie = it + FI.getRequiredArgs().getNumRequiredArgs();
970  } else {
971    ie = FI.arg_end();
972  }
973  for (; it != ie; ++it) {
974    const ABIArgInfo &argAI = it->info;
975
976    // Insert a padding type to ensure proper alignment.
977    if (llvm::Type *PaddingType = argAI.getPaddingType())
978      argTypes.push_back(PaddingType);
979
980    switch (argAI.getKind()) {
981    case ABIArgInfo::Ignore:
982    case ABIArgInfo::InAlloca:
983      break;
984
985    case ABIArgInfo::Indirect: {
986      // indirect arguments are always on the stack, which is addr space #0.
987      llvm::Type *LTy = ConvertTypeForMem(it->type);
988      argTypes.push_back(LTy->getPointerTo());
989      break;
990    }
991
992    case ABIArgInfo::Extend:
993    case ABIArgInfo::Direct: {
994      // If the coerce-to type is a first class aggregate, flatten it.  Either
995      // way is semantically identical, but fast-isel and the optimizer
996      // generally likes scalar values better than FCAs.
997      llvm::Type *argType = argAI.getCoerceToType();
998      if (llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) {
999        for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1000          argTypes.push_back(st->getElementType(i));
1001      } else {
1002        argTypes.push_back(argType);
1003      }
1004      break;
1005    }
1006
1007    case ABIArgInfo::Expand:
1008      GetExpandedTypes(it->type, argTypes);
1009      break;
1010    }
1011  }
1012
1013  // Add the inalloca struct as the last parameter type.
1014  if (llvm::StructType *ArgStruct = FI.getArgStruct())
1015    argTypes.push_back(ArgStruct->getPointerTo());
1016
1017  bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1018  assert(Erased && "Not in set?");
1019
1020  return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic());
1021}
1022
1023llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1024  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1025  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1026
1027  if (!isFuncTypeConvertible(FPT))
1028    return llvm::StructType::get(getLLVMContext());
1029
1030  const CGFunctionInfo *Info;
1031  if (isa<CXXDestructorDecl>(MD))
1032    Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType());
1033  else
1034    Info = &arrangeCXXMethodDeclaration(MD);
1035  return GetFunctionType(*Info);
1036}
1037
1038void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
1039                                           const Decl *TargetDecl,
1040                                           AttributeListType &PAL,
1041                                           unsigned &CallingConv,
1042                                           bool AttrOnCallSite) {
1043  llvm::AttrBuilder FuncAttrs;
1044  llvm::AttrBuilder RetAttrs;
1045
1046  CallingConv = FI.getEffectiveCallingConvention();
1047
1048  if (FI.isNoReturn())
1049    FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1050
1051  // FIXME: handle sseregparm someday...
1052  if (TargetDecl) {
1053    if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1054      FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1055    if (TargetDecl->hasAttr<NoThrowAttr>())
1056      FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1057    if (TargetDecl->hasAttr<NoReturnAttr>())
1058      FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1059    if (TargetDecl->hasAttr<NoDuplicateAttr>())
1060      FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1061
1062    if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1063      const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
1064      if (FPT && FPT->isNothrow(getContext()))
1065        FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1066      // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1067      // These attributes are not inherited by overloads.
1068      const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1069      if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1070        FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1071    }
1072
1073    // 'const' and 'pure' attribute functions are also nounwind.
1074    if (TargetDecl->hasAttr<ConstAttr>()) {
1075      FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1076      FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1077    } else if (TargetDecl->hasAttr<PureAttr>()) {
1078      FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1079      FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1080    }
1081    if (TargetDecl->hasAttr<MallocAttr>())
1082      RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1083  }
1084
1085  if (CodeGenOpts.OptimizeSize)
1086    FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1087  if (CodeGenOpts.OptimizeSize == 2)
1088    FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1089  if (CodeGenOpts.DisableRedZone)
1090    FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1091  if (CodeGenOpts.NoImplicitFloat)
1092    FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1093
1094  if (AttrOnCallSite) {
1095    // Attributes that should go on the call site only.
1096    if (!CodeGenOpts.SimplifyLibCalls)
1097      FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1098  } else {
1099    // Attributes that should go on the function, but not the call site.
1100    if (!CodeGenOpts.DisableFPElim) {
1101      FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1102    } else if (CodeGenOpts.OmitLeafFramePointer) {
1103      FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1104      FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1105    } else {
1106      FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1107      FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1108    }
1109
1110    FuncAttrs.addAttribute("less-precise-fpmad",
1111                           llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1112    FuncAttrs.addAttribute("no-infs-fp-math",
1113                           llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1114    FuncAttrs.addAttribute("no-nans-fp-math",
1115                           llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1116    FuncAttrs.addAttribute("unsafe-fp-math",
1117                           llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1118    FuncAttrs.addAttribute("use-soft-float",
1119                           llvm::toStringRef(CodeGenOpts.SoftFloat));
1120    FuncAttrs.addAttribute("stack-protector-buffer-size",
1121                           llvm::utostr(CodeGenOpts.SSPBufferSize));
1122
1123    if (!CodeGenOpts.StackRealignment)
1124      FuncAttrs.addAttribute("no-realign-stack");
1125  }
1126
1127  QualType RetTy = FI.getReturnType();
1128  unsigned Index = 1;
1129  const ABIArgInfo &RetAI = FI.getReturnInfo();
1130  switch (RetAI.getKind()) {
1131  case ABIArgInfo::Extend:
1132    if (RetTy->hasSignedIntegerRepresentation())
1133      RetAttrs.addAttribute(llvm::Attribute::SExt);
1134    else if (RetTy->hasUnsignedIntegerRepresentation())
1135      RetAttrs.addAttribute(llvm::Attribute::ZExt);
1136    // FALL THROUGH
1137  case ABIArgInfo::Direct:
1138    if (RetAI.getInReg())
1139      RetAttrs.addAttribute(llvm::Attribute::InReg);
1140    break;
1141  case ABIArgInfo::Ignore:
1142    break;
1143
1144  case ABIArgInfo::InAlloca: {
1145    // inalloca disables readnone and readonly
1146    FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1147      .removeAttribute(llvm::Attribute::ReadNone);
1148    break;
1149  }
1150
1151  case ABIArgInfo::Indirect: {
1152    llvm::AttrBuilder SRETAttrs;
1153    SRETAttrs.addAttribute(llvm::Attribute::StructRet);
1154    if (RetAI.getInReg())
1155      SRETAttrs.addAttribute(llvm::Attribute::InReg);
1156    PAL.push_back(llvm::
1157                  AttributeSet::get(getLLVMContext(), Index, SRETAttrs));
1158
1159    ++Index;
1160    // sret disables readnone and readonly
1161    FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1162      .removeAttribute(llvm::Attribute::ReadNone);
1163    break;
1164  }
1165
1166  case ABIArgInfo::Expand:
1167    llvm_unreachable("Invalid ABI kind for return argument");
1168  }
1169
1170  if (RetAttrs.hasAttributes())
1171    PAL.push_back(llvm::
1172                  AttributeSet::get(getLLVMContext(),
1173                                    llvm::AttributeSet::ReturnIndex,
1174                                    RetAttrs));
1175
1176  for (const auto &I : FI.arguments()) {
1177    QualType ParamType = I.type;
1178    const ABIArgInfo &AI = I.info;
1179    llvm::AttrBuilder Attrs;
1180
1181    if (AI.getPaddingType()) {
1182      if (AI.getPaddingInReg())
1183        PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index,
1184                                              llvm::Attribute::InReg));
1185      // Increment Index if there is padding.
1186      ++Index;
1187    }
1188
1189    // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
1190    // have the corresponding parameter variable.  It doesn't make
1191    // sense to do it here because parameters are so messed up.
1192    switch (AI.getKind()) {
1193    case ABIArgInfo::Extend:
1194      if (ParamType->isSignedIntegerOrEnumerationType())
1195        Attrs.addAttribute(llvm::Attribute::SExt);
1196      else if (ParamType->isUnsignedIntegerOrEnumerationType())
1197        Attrs.addAttribute(llvm::Attribute::ZExt);
1198      // FALL THROUGH
1199    case ABIArgInfo::Direct:
1200      if (AI.getInReg())
1201        Attrs.addAttribute(llvm::Attribute::InReg);
1202
1203      // FIXME: handle sseregparm someday...
1204
1205      if (llvm::StructType *STy =
1206          dyn_cast<llvm::StructType>(AI.getCoerceToType())) {
1207        unsigned Extra = STy->getNumElements()-1;  // 1 will be added below.
1208        if (Attrs.hasAttributes())
1209          for (unsigned I = 0; I < Extra; ++I)
1210            PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index + I,
1211                                                  Attrs));
1212        Index += Extra;
1213      }
1214      break;
1215
1216    case ABIArgInfo::Indirect:
1217      if (AI.getInReg())
1218        Attrs.addAttribute(llvm::Attribute::InReg);
1219
1220      if (AI.getIndirectByVal())
1221        Attrs.addAttribute(llvm::Attribute::ByVal);
1222
1223      Attrs.addAlignmentAttr(AI.getIndirectAlign());
1224
1225      // byval disables readnone and readonly.
1226      FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1227        .removeAttribute(llvm::Attribute::ReadNone);
1228      break;
1229
1230    case ABIArgInfo::Ignore:
1231      // Skip increment, no matching LLVM parameter.
1232      continue;
1233
1234    case ABIArgInfo::InAlloca:
1235      // inalloca disables readnone and readonly.
1236      FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1237          .removeAttribute(llvm::Attribute::ReadNone);
1238      // Skip increment, no matching LLVM parameter.
1239      continue;
1240
1241    case ABIArgInfo::Expand: {
1242      SmallVector<llvm::Type*, 8> types;
1243      // FIXME: This is rather inefficient. Do we ever actually need to do
1244      // anything here? The result should be just reconstructed on the other
1245      // side, so extension should be a non-issue.
1246      getTypes().GetExpandedTypes(ParamType, types);
1247      Index += types.size();
1248      continue;
1249    }
1250    }
1251
1252    if (Attrs.hasAttributes())
1253      PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index, Attrs));
1254    ++Index;
1255  }
1256
1257  // Add the inalloca attribute to the trailing inalloca parameter if present.
1258  if (FI.usesInAlloca()) {
1259    llvm::AttrBuilder Attrs;
1260    Attrs.addAttribute(llvm::Attribute::InAlloca);
1261    PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index, Attrs));
1262  }
1263
1264  if (FuncAttrs.hasAttributes())
1265    PAL.push_back(llvm::
1266                  AttributeSet::get(getLLVMContext(),
1267                                    llvm::AttributeSet::FunctionIndex,
1268                                    FuncAttrs));
1269}
1270
1271/// An argument came in as a promoted argument; demote it back to its
1272/// declared type.
1273static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
1274                                         const VarDecl *var,
1275                                         llvm::Value *value) {
1276  llvm::Type *varType = CGF.ConvertType(var->getType());
1277
1278  // This can happen with promotions that actually don't change the
1279  // underlying type, like the enum promotions.
1280  if (value->getType() == varType) return value;
1281
1282  assert((varType->isIntegerTy() || varType->isFloatingPointTy())
1283         && "unexpected promotion type");
1284
1285  if (isa<llvm::IntegerType>(varType))
1286    return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
1287
1288  return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
1289}
1290
1291void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
1292                                         llvm::Function *Fn,
1293                                         const FunctionArgList &Args) {
1294  // If this is an implicit-return-zero function, go ahead and
1295  // initialize the return value.  TODO: it might be nice to have
1296  // a more general mechanism for this that didn't require synthesized
1297  // return statements.
1298  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
1299    if (FD->hasImplicitReturnZero()) {
1300      QualType RetTy = FD->getReturnType().getUnqualifiedType();
1301      llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
1302      llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
1303      Builder.CreateStore(Zero, ReturnValue);
1304    }
1305  }
1306
1307  // FIXME: We no longer need the types from FunctionArgList; lift up and
1308  // simplify.
1309
1310  // Emit allocs for param decls.  Give the LLVM Argument nodes names.
1311  llvm::Function::arg_iterator AI = Fn->arg_begin();
1312
1313  // If we're using inalloca, all the memory arguments are GEPs off of the last
1314  // parameter, which is a pointer to the complete memory area.
1315  llvm::Value *ArgStruct = 0;
1316  if (FI.usesInAlloca()) {
1317    llvm::Function::arg_iterator EI = Fn->arg_end();
1318    --EI;
1319    ArgStruct = EI;
1320    assert(ArgStruct->getType() == FI.getArgStruct()->getPointerTo());
1321  }
1322
1323  // Name the struct return argument.
1324  if (CGM.ReturnTypeUsesSRet(FI)) {
1325    AI->setName("agg.result");
1326    AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1327                                        AI->getArgNo() + 1,
1328                                        llvm::Attribute::NoAlias));
1329    ++AI;
1330  }
1331
1332  // Track if we received the parameter as a pointer (indirect, byval, or
1333  // inalloca).  If already have a pointer, EmitParmDecl doesn't need to copy it
1334  // into a local alloca for us.
1335  enum ValOrPointer { HaveValue = 0, HavePointer = 1 };
1336  typedef llvm::PointerIntPair<llvm::Value *, 1> ValueAndIsPtr;
1337  SmallVector<ValueAndIsPtr, 16> ArgVals;
1338  ArgVals.reserve(Args.size());
1339
1340  // Create a pointer value for every parameter declaration.  This usually
1341  // entails copying one or more LLVM IR arguments into an alloca.  Don't push
1342  // any cleanups or do anything that might unwind.  We do that separately, so
1343  // we can push the cleanups in the correct order for the ABI.
1344  assert(FI.arg_size() == Args.size() &&
1345         "Mismatch between function signature & arguments.");
1346  unsigned ArgNo = 1;
1347  CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
1348  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1349       i != e; ++i, ++info_it, ++ArgNo) {
1350    const VarDecl *Arg = *i;
1351    QualType Ty = info_it->type;
1352    const ABIArgInfo &ArgI = info_it->info;
1353
1354    bool isPromoted =
1355      isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
1356
1357    // Skip the dummy padding argument.
1358    if (ArgI.getPaddingType())
1359      ++AI;
1360
1361    switch (ArgI.getKind()) {
1362    case ABIArgInfo::InAlloca: {
1363      llvm::Value *V = Builder.CreateStructGEP(
1364          ArgStruct, ArgI.getInAllocaFieldIndex(), Arg->getName());
1365      ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
1366      continue;  // Don't increment AI!
1367    }
1368
1369    case ABIArgInfo::Indirect: {
1370      llvm::Value *V = AI;
1371
1372      if (!hasScalarEvaluationKind(Ty)) {
1373        // Aggregates and complex variables are accessed by reference.  All we
1374        // need to do is realign the value, if requested
1375        if (ArgI.getIndirectRealign()) {
1376          llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
1377
1378          // Copy from the incoming argument pointer to the temporary with the
1379          // appropriate alignment.
1380          //
1381          // FIXME: We should have a common utility for generating an aggregate
1382          // copy.
1383          llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
1384          CharUnits Size = getContext().getTypeSizeInChars(Ty);
1385          llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
1386          llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
1387          Builder.CreateMemCpy(Dst,
1388                               Src,
1389                               llvm::ConstantInt::get(IntPtrTy,
1390                                                      Size.getQuantity()),
1391                               ArgI.getIndirectAlign(),
1392                               false);
1393          V = AlignedTemp;
1394        }
1395        ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
1396      } else {
1397        // Load scalar value from indirect argument.
1398        CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
1399        V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty,
1400                             Arg->getLocStart());
1401
1402        if (isPromoted)
1403          V = emitArgumentDemotion(*this, Arg, V);
1404        ArgVals.push_back(ValueAndIsPtr(V, HaveValue));
1405      }
1406      break;
1407    }
1408
1409    case ABIArgInfo::Extend:
1410    case ABIArgInfo::Direct: {
1411
1412      // If we have the trivial case, handle it with no muss and fuss.
1413      if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
1414          ArgI.getCoerceToType() == ConvertType(Ty) &&
1415          ArgI.getDirectOffset() == 0) {
1416        assert(AI != Fn->arg_end() && "Argument mismatch!");
1417        llvm::Value *V = AI;
1418
1419        if (Arg->getType().isRestrictQualified())
1420          AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1421                                              AI->getArgNo() + 1,
1422                                              llvm::Attribute::NoAlias));
1423
1424        // Ensure the argument is the correct type.
1425        if (V->getType() != ArgI.getCoerceToType())
1426          V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
1427
1428        if (isPromoted)
1429          V = emitArgumentDemotion(*this, Arg, V);
1430
1431        if (const CXXMethodDecl *MD =
1432            dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) {
1433          if (MD->isVirtual() && Arg == CXXABIThisDecl)
1434            V = CGM.getCXXABI().
1435                adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V);
1436        }
1437
1438        // Because of merging of function types from multiple decls it is
1439        // possible for the type of an argument to not match the corresponding
1440        // type in the function type. Since we are codegening the callee
1441        // in here, add a cast to the argument type.
1442        llvm::Type *LTy = ConvertType(Arg->getType());
1443        if (V->getType() != LTy)
1444          V = Builder.CreateBitCast(V, LTy);
1445
1446        ArgVals.push_back(ValueAndIsPtr(V, HaveValue));
1447        break;
1448      }
1449
1450      llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName());
1451
1452      // The alignment we need to use is the max of the requested alignment for
1453      // the argument plus the alignment required by our access code below.
1454      unsigned AlignmentToUse =
1455        CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType());
1456      AlignmentToUse = std::max(AlignmentToUse,
1457                        (unsigned)getContext().getDeclAlign(Arg).getQuantity());
1458
1459      Alloca->setAlignment(AlignmentToUse);
1460      llvm::Value *V = Alloca;
1461      llvm::Value *Ptr = V;    // Pointer to store into.
1462
1463      // If the value is offset in memory, apply the offset now.
1464      if (unsigned Offs = ArgI.getDirectOffset()) {
1465        Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
1466        Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
1467        Ptr = Builder.CreateBitCast(Ptr,
1468                          llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
1469      }
1470
1471      // If the coerce-to type is a first class aggregate, we flatten it and
1472      // pass the elements. Either way is semantically identical, but fast-isel
1473      // and the optimizer generally likes scalar values better than FCAs.
1474      llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
1475      if (STy && STy->getNumElements() > 1) {
1476        uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
1477        llvm::Type *DstTy =
1478          cast<llvm::PointerType>(Ptr->getType())->getElementType();
1479        uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
1480
1481        if (SrcSize <= DstSize) {
1482          Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
1483
1484          for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1485            assert(AI != Fn->arg_end() && "Argument mismatch!");
1486            AI->setName(Arg->getName() + ".coerce" + Twine(i));
1487            llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
1488            Builder.CreateStore(AI++, EltPtr);
1489          }
1490        } else {
1491          llvm::AllocaInst *TempAlloca =
1492            CreateTempAlloca(ArgI.getCoerceToType(), "coerce");
1493          TempAlloca->setAlignment(AlignmentToUse);
1494          llvm::Value *TempV = TempAlloca;
1495
1496          for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1497            assert(AI != Fn->arg_end() && "Argument mismatch!");
1498            AI->setName(Arg->getName() + ".coerce" + Twine(i));
1499            llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i);
1500            Builder.CreateStore(AI++, EltPtr);
1501          }
1502
1503          Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse);
1504        }
1505      } else {
1506        // Simple case, just do a coerced store of the argument into the alloca.
1507        assert(AI != Fn->arg_end() && "Argument mismatch!");
1508        AI->setName(Arg->getName() + ".coerce");
1509        CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
1510      }
1511
1512
1513      // Match to what EmitParmDecl is expecting for this type.
1514      if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
1515        V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty, Arg->getLocStart());
1516        if (isPromoted)
1517          V = emitArgumentDemotion(*this, Arg, V);
1518        ArgVals.push_back(ValueAndIsPtr(V, HaveValue));
1519      } else {
1520        ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
1521      }
1522      continue;  // Skip ++AI increment, already done.
1523    }
1524
1525    case ABIArgInfo::Expand: {
1526      // If this structure was expanded into multiple arguments then
1527      // we need to create a temporary and reconstruct it from the
1528      // arguments.
1529      llvm::AllocaInst *Alloca = CreateMemTemp(Ty);
1530      CharUnits Align = getContext().getDeclAlign(Arg);
1531      Alloca->setAlignment(Align.getQuantity());
1532      LValue LV = MakeAddrLValue(Alloca, Ty, Align);
1533      llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI);
1534      ArgVals.push_back(ValueAndIsPtr(Alloca, HavePointer));
1535
1536      // Name the arguments used in expansion and increment AI.
1537      unsigned Index = 0;
1538      for (; AI != End; ++AI, ++Index)
1539        AI->setName(Arg->getName() + "." + Twine(Index));
1540      continue;
1541    }
1542
1543    case ABIArgInfo::Ignore:
1544      // Initialize the local variable appropriately.
1545      if (!hasScalarEvaluationKind(Ty)) {
1546        ArgVals.push_back(ValueAndIsPtr(CreateMemTemp(Ty), HavePointer));
1547      } else {
1548        llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
1549        ArgVals.push_back(ValueAndIsPtr(U, HaveValue));
1550      }
1551
1552      // Skip increment, no matching LLVM parameter.
1553      continue;
1554    }
1555
1556    ++AI;
1557  }
1558
1559  if (FI.usesInAlloca())
1560    ++AI;
1561  assert(AI == Fn->arg_end() && "Argument mismatch!");
1562
1563  if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
1564    for (int I = Args.size() - 1; I >= 0; --I)
1565      EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(),
1566                   I + 1);
1567  } else {
1568    for (unsigned I = 0, E = Args.size(); I != E; ++I)
1569      EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(),
1570                   I + 1);
1571  }
1572}
1573
1574static void eraseUnusedBitCasts(llvm::Instruction *insn) {
1575  while (insn->use_empty()) {
1576    llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
1577    if (!bitcast) return;
1578
1579    // This is "safe" because we would have used a ConstantExpr otherwise.
1580    insn = cast<llvm::Instruction>(bitcast->getOperand(0));
1581    bitcast->eraseFromParent();
1582  }
1583}
1584
1585/// Try to emit a fused autorelease of a return result.
1586static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
1587                                                    llvm::Value *result) {
1588  // We must be immediately followed the cast.
1589  llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
1590  if (BB->empty()) return 0;
1591  if (&BB->back() != result) return 0;
1592
1593  llvm::Type *resultType = result->getType();
1594
1595  // result is in a BasicBlock and is therefore an Instruction.
1596  llvm::Instruction *generator = cast<llvm::Instruction>(result);
1597
1598  SmallVector<llvm::Instruction*,4> insnsToKill;
1599
1600  // Look for:
1601  //  %generator = bitcast %type1* %generator2 to %type2*
1602  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
1603    // We would have emitted this as a constant if the operand weren't
1604    // an Instruction.
1605    generator = cast<llvm::Instruction>(bitcast->getOperand(0));
1606
1607    // Require the generator to be immediately followed by the cast.
1608    if (generator->getNextNode() != bitcast)
1609      return 0;
1610
1611    insnsToKill.push_back(bitcast);
1612  }
1613
1614  // Look for:
1615  //   %generator = call i8* @objc_retain(i8* %originalResult)
1616  // or
1617  //   %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
1618  llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
1619  if (!call) return 0;
1620
1621  bool doRetainAutorelease;
1622
1623  if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) {
1624    doRetainAutorelease = true;
1625  } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints()
1626                                          .objc_retainAutoreleasedReturnValue) {
1627    doRetainAutorelease = false;
1628
1629    // If we emitted an assembly marker for this call (and the
1630    // ARCEntrypoints field should have been set if so), go looking
1631    // for that call.  If we can't find it, we can't do this
1632    // optimization.  But it should always be the immediately previous
1633    // instruction, unless we needed bitcasts around the call.
1634    if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) {
1635      llvm::Instruction *prev = call->getPrevNode();
1636      assert(prev);
1637      if (isa<llvm::BitCastInst>(prev)) {
1638        prev = prev->getPrevNode();
1639        assert(prev);
1640      }
1641      assert(isa<llvm::CallInst>(prev));
1642      assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
1643               CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker);
1644      insnsToKill.push_back(prev);
1645    }
1646  } else {
1647    return 0;
1648  }
1649
1650  result = call->getArgOperand(0);
1651  insnsToKill.push_back(call);
1652
1653  // Keep killing bitcasts, for sanity.  Note that we no longer care
1654  // about precise ordering as long as there's exactly one use.
1655  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
1656    if (!bitcast->hasOneUse()) break;
1657    insnsToKill.push_back(bitcast);
1658    result = bitcast->getOperand(0);
1659  }
1660
1661  // Delete all the unnecessary instructions, from latest to earliest.
1662  for (SmallVectorImpl<llvm::Instruction*>::iterator
1663         i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
1664    (*i)->eraseFromParent();
1665
1666  // Do the fused retain/autorelease if we were asked to.
1667  if (doRetainAutorelease)
1668    result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
1669
1670  // Cast back to the result type.
1671  return CGF.Builder.CreateBitCast(result, resultType);
1672}
1673
1674/// If this is a +1 of the value of an immutable 'self', remove it.
1675static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
1676                                          llvm::Value *result) {
1677  // This is only applicable to a method with an immutable 'self'.
1678  const ObjCMethodDecl *method =
1679    dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
1680  if (!method) return 0;
1681  const VarDecl *self = method->getSelfDecl();
1682  if (!self->getType().isConstQualified()) return 0;
1683
1684  // Look for a retain call.
1685  llvm::CallInst *retainCall =
1686    dyn_cast<llvm::CallInst>(result->stripPointerCasts());
1687  if (!retainCall ||
1688      retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain)
1689    return 0;
1690
1691  // Look for an ordinary load of 'self'.
1692  llvm::Value *retainedValue = retainCall->getArgOperand(0);
1693  llvm::LoadInst *load =
1694    dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
1695  if (!load || load->isAtomic() || load->isVolatile() ||
1696      load->getPointerOperand() != CGF.GetAddrOfLocalVar(self))
1697    return 0;
1698
1699  // Okay!  Burn it all down.  This relies for correctness on the
1700  // assumption that the retain is emitted as part of the return and
1701  // that thereafter everything is used "linearly".
1702  llvm::Type *resultType = result->getType();
1703  eraseUnusedBitCasts(cast<llvm::Instruction>(result));
1704  assert(retainCall->use_empty());
1705  retainCall->eraseFromParent();
1706  eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
1707
1708  return CGF.Builder.CreateBitCast(load, resultType);
1709}
1710
1711/// Emit an ARC autorelease of the result of a function.
1712///
1713/// \return the value to actually return from the function
1714static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
1715                                            llvm::Value *result) {
1716  // If we're returning 'self', kill the initial retain.  This is a
1717  // heuristic attempt to "encourage correctness" in the really unfortunate
1718  // case where we have a return of self during a dealloc and we desperately
1719  // need to avoid the possible autorelease.
1720  if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
1721    return self;
1722
1723  // At -O0, try to emit a fused retain/autorelease.
1724  if (CGF.shouldUseFusedARCCalls())
1725    if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
1726      return fused;
1727
1728  return CGF.EmitARCAutoreleaseReturnValue(result);
1729}
1730
1731/// Heuristically search for a dominating store to the return-value slot.
1732static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
1733  // If there are multiple uses of the return-value slot, just check
1734  // for something immediately preceding the IP.  Sometimes this can
1735  // happen with how we generate implicit-returns; it can also happen
1736  // with noreturn cleanups.
1737  if (!CGF.ReturnValue->hasOneUse()) {
1738    llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
1739    if (IP->empty()) return 0;
1740    llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back());
1741    if (!store) return 0;
1742    if (store->getPointerOperand() != CGF.ReturnValue) return 0;
1743    assert(!store->isAtomic() && !store->isVolatile()); // see below
1744    return store;
1745  }
1746
1747  llvm::StoreInst *store =
1748    dyn_cast<llvm::StoreInst>(CGF.ReturnValue->user_back());
1749  if (!store) return 0;
1750
1751  // These aren't actually possible for non-coerced returns, and we
1752  // only care about non-coerced returns on this code path.
1753  assert(!store->isAtomic() && !store->isVolatile());
1754
1755  // Now do a first-and-dirty dominance check: just walk up the
1756  // single-predecessors chain from the current insertion point.
1757  llvm::BasicBlock *StoreBB = store->getParent();
1758  llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
1759  while (IP != StoreBB) {
1760    if (!(IP = IP->getSinglePredecessor()))
1761      return 0;
1762  }
1763
1764  // Okay, the store's basic block dominates the insertion point; we
1765  // can do our thing.
1766  return store;
1767}
1768
1769void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
1770                                         bool EmitRetDbgLoc,
1771                                         SourceLocation EndLoc) {
1772  // Functions with no result always return void.
1773  if (ReturnValue == 0) {
1774    Builder.CreateRetVoid();
1775    return;
1776  }
1777
1778  llvm::DebugLoc RetDbgLoc;
1779  llvm::Value *RV = 0;
1780  QualType RetTy = FI.getReturnType();
1781  const ABIArgInfo &RetAI = FI.getReturnInfo();
1782
1783  switch (RetAI.getKind()) {
1784  case ABIArgInfo::InAlloca:
1785    // Aggregrates get evaluated directly into the destination.  Sometimes we
1786    // need to return the sret value in a register, though.
1787    assert(hasAggregateEvaluationKind(RetTy));
1788    if (RetAI.getInAllocaSRet()) {
1789      llvm::Function::arg_iterator EI = CurFn->arg_end();
1790      --EI;
1791      llvm::Value *ArgStruct = EI;
1792      llvm::Value *SRet =
1793          Builder.CreateStructGEP(ArgStruct, RetAI.getInAllocaFieldIndex());
1794      RV = Builder.CreateLoad(SRet, "sret");
1795    }
1796    break;
1797
1798  case ABIArgInfo::Indirect: {
1799    switch (getEvaluationKind(RetTy)) {
1800    case TEK_Complex: {
1801      ComplexPairTy RT =
1802        EmitLoadOfComplex(MakeNaturalAlignAddrLValue(ReturnValue, RetTy),
1803                          EndLoc);
1804      EmitStoreOfComplex(RT,
1805                       MakeNaturalAlignAddrLValue(CurFn->arg_begin(), RetTy),
1806                         /*isInit*/ true);
1807      break;
1808    }
1809    case TEK_Aggregate:
1810      // Do nothing; aggregrates get evaluated directly into the destination.
1811      break;
1812    case TEK_Scalar:
1813      EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
1814                        MakeNaturalAlignAddrLValue(CurFn->arg_begin(), RetTy),
1815                        /*isInit*/ true);
1816      break;
1817    }
1818    break;
1819  }
1820
1821  case ABIArgInfo::Extend:
1822  case ABIArgInfo::Direct:
1823    if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
1824        RetAI.getDirectOffset() == 0) {
1825      // The internal return value temp always will have pointer-to-return-type
1826      // type, just do a load.
1827
1828      // If there is a dominating store to ReturnValue, we can elide
1829      // the load, zap the store, and usually zap the alloca.
1830      if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) {
1831        // Reuse the debug location from the store unless there is
1832        // cleanup code to be emitted between the store and return
1833        // instruction.
1834        if (EmitRetDbgLoc && !AutoreleaseResult)
1835          RetDbgLoc = SI->getDebugLoc();
1836        // Get the stored value and nuke the now-dead store.
1837        RV = SI->getValueOperand();
1838        SI->eraseFromParent();
1839
1840        // If that was the only use of the return value, nuke it as well now.
1841        if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
1842          cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
1843          ReturnValue = 0;
1844        }
1845
1846      // Otherwise, we have to do a simple load.
1847      } else {
1848        RV = Builder.CreateLoad(ReturnValue);
1849      }
1850    } else {
1851      llvm::Value *V = ReturnValue;
1852      // If the value is offset in memory, apply the offset now.
1853      if (unsigned Offs = RetAI.getDirectOffset()) {
1854        V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
1855        V = Builder.CreateConstGEP1_32(V, Offs);
1856        V = Builder.CreateBitCast(V,
1857                         llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
1858      }
1859
1860      RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
1861    }
1862
1863    // In ARC, end functions that return a retainable type with a call
1864    // to objc_autoreleaseReturnValue.
1865    if (AutoreleaseResult) {
1866      assert(getLangOpts().ObjCAutoRefCount &&
1867             !FI.isReturnsRetained() &&
1868             RetTy->isObjCRetainableType());
1869      RV = emitAutoreleaseOfResult(*this, RV);
1870    }
1871
1872    break;
1873
1874  case ABIArgInfo::Ignore:
1875    break;
1876
1877  case ABIArgInfo::Expand:
1878    llvm_unreachable("Invalid ABI kind for return argument");
1879  }
1880
1881  llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
1882  if (!RetDbgLoc.isUnknown())
1883    Ret->setDebugLoc(RetDbgLoc);
1884}
1885
1886static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
1887  const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
1888  return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
1889}
1890
1891static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty) {
1892  // FIXME: Generate IR in one pass, rather than going back and fixing up these
1893  // placeholders.
1894  llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
1895  llvm::Value *Placeholder =
1896      llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo());
1897  Placeholder = CGF.Builder.CreateLoad(Placeholder);
1898  return AggValueSlot::forAddr(Placeholder, CharUnits::Zero(),
1899                               Ty.getQualifiers(),
1900                               AggValueSlot::IsNotDestructed,
1901                               AggValueSlot::DoesNotNeedGCBarriers,
1902                               AggValueSlot::IsNotAliased);
1903}
1904
1905void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
1906                                          const VarDecl *param,
1907                                          SourceLocation loc) {
1908  // StartFunction converted the ABI-lowered parameter(s) into a
1909  // local alloca.  We need to turn that into an r-value suitable
1910  // for EmitCall.
1911  llvm::Value *local = GetAddrOfLocalVar(param);
1912
1913  QualType type = param->getType();
1914
1915  // For the most part, we just need to load the alloca, except:
1916  // 1) aggregate r-values are actually pointers to temporaries, and
1917  // 2) references to non-scalars are pointers directly to the aggregate.
1918  // I don't know why references to scalars are different here.
1919  if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
1920    if (!hasScalarEvaluationKind(ref->getPointeeType()))
1921      return args.add(RValue::getAggregate(local), type);
1922
1923    // Locals which are references to scalars are represented
1924    // with allocas holding the pointer.
1925    return args.add(RValue::get(Builder.CreateLoad(local)), type);
1926  }
1927
1928  if (isInAllocaArgument(CGM.getCXXABI(), type)) {
1929    AggValueSlot Slot = createPlaceholderSlot(*this, type);
1930    Slot.setExternallyDestructed();
1931
1932    // FIXME: Either emit a copy constructor call, or figure out how to do
1933    // guaranteed tail calls with perfect forwarding in LLVM.
1934    CGM.ErrorUnsupported(param, "non-trivial argument copy for thunk");
1935    EmitNullInitialization(Slot.getAddr(), type);
1936
1937    RValue RV = Slot.asRValue();
1938    args.add(RV, type);
1939    return;
1940  }
1941
1942  args.add(convertTempToRValue(local, type, loc), type);
1943}
1944
1945static bool isProvablyNull(llvm::Value *addr) {
1946  return isa<llvm::ConstantPointerNull>(addr);
1947}
1948
1949static bool isProvablyNonNull(llvm::Value *addr) {
1950  return isa<llvm::AllocaInst>(addr);
1951}
1952
1953/// Emit the actual writing-back of a writeback.
1954static void emitWriteback(CodeGenFunction &CGF,
1955                          const CallArgList::Writeback &writeback) {
1956  const LValue &srcLV = writeback.Source;
1957  llvm::Value *srcAddr = srcLV.getAddress();
1958  assert(!isProvablyNull(srcAddr) &&
1959         "shouldn't have writeback for provably null argument");
1960
1961  llvm::BasicBlock *contBB = 0;
1962
1963  // If the argument wasn't provably non-null, we need to null check
1964  // before doing the store.
1965  bool provablyNonNull = isProvablyNonNull(srcAddr);
1966  if (!provablyNonNull) {
1967    llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
1968    contBB = CGF.createBasicBlock("icr.done");
1969
1970    llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
1971    CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
1972    CGF.EmitBlock(writebackBB);
1973  }
1974
1975  // Load the value to writeback.
1976  llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
1977
1978  // Cast it back, in case we're writing an id to a Foo* or something.
1979  value = CGF.Builder.CreateBitCast(value,
1980               cast<llvm::PointerType>(srcAddr->getType())->getElementType(),
1981                            "icr.writeback-cast");
1982
1983  // Perform the writeback.
1984
1985  // If we have a "to use" value, it's something we need to emit a use
1986  // of.  This has to be carefully threaded in: if it's done after the
1987  // release it's potentially undefined behavior (and the optimizer
1988  // will ignore it), and if it happens before the retain then the
1989  // optimizer could move the release there.
1990  if (writeback.ToUse) {
1991    assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
1992
1993    // Retain the new value.  No need to block-copy here:  the block's
1994    // being passed up the stack.
1995    value = CGF.EmitARCRetainNonBlock(value);
1996
1997    // Emit the intrinsic use here.
1998    CGF.EmitARCIntrinsicUse(writeback.ToUse);
1999
2000    // Load the old value (primitively).
2001    llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
2002
2003    // Put the new value in place (primitively).
2004    CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
2005
2006    // Release the old value.
2007    CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
2008
2009  // Otherwise, we can just do a normal lvalue store.
2010  } else {
2011    CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
2012  }
2013
2014  // Jump to the continuation block.
2015  if (!provablyNonNull)
2016    CGF.EmitBlock(contBB);
2017}
2018
2019static void emitWritebacks(CodeGenFunction &CGF,
2020                           const CallArgList &args) {
2021  for (const auto &I : args.writebacks())
2022    emitWriteback(CGF, I);
2023}
2024
2025static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
2026                                            const CallArgList &CallArgs) {
2027  assert(CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee());
2028  ArrayRef<CallArgList::CallArgCleanup> Cleanups =
2029    CallArgs.getCleanupsToDeactivate();
2030  // Iterate in reverse to increase the likelihood of popping the cleanup.
2031  for (ArrayRef<CallArgList::CallArgCleanup>::reverse_iterator
2032         I = Cleanups.rbegin(), E = Cleanups.rend(); I != E; ++I) {
2033    CGF.DeactivateCleanupBlock(I->Cleanup, I->IsActiveIP);
2034    I->IsActiveIP->eraseFromParent();
2035  }
2036}
2037
2038static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
2039  if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
2040    if (uop->getOpcode() == UO_AddrOf)
2041      return uop->getSubExpr();
2042  return 0;
2043}
2044
2045/// Emit an argument that's being passed call-by-writeback.  That is,
2046/// we are passing the address of
2047static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
2048                             const ObjCIndirectCopyRestoreExpr *CRE) {
2049  LValue srcLV;
2050
2051  // Make an optimistic effort to emit the address as an l-value.
2052  // This can fail if the the argument expression is more complicated.
2053  if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
2054    srcLV = CGF.EmitLValue(lvExpr);
2055
2056  // Otherwise, just emit it as a scalar.
2057  } else {
2058    llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
2059
2060    QualType srcAddrType =
2061      CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
2062    srcLV = CGF.MakeNaturalAlignAddrLValue(srcAddr, srcAddrType);
2063  }
2064  llvm::Value *srcAddr = srcLV.getAddress();
2065
2066  // The dest and src types don't necessarily match in LLVM terms
2067  // because of the crazy ObjC compatibility rules.
2068
2069  llvm::PointerType *destType =
2070    cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
2071
2072  // If the address is a constant null, just pass the appropriate null.
2073  if (isProvablyNull(srcAddr)) {
2074    args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
2075             CRE->getType());
2076    return;
2077  }
2078
2079  // Create the temporary.
2080  llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(),
2081                                           "icr.temp");
2082  // Loading an l-value can introduce a cleanup if the l-value is __weak,
2083  // and that cleanup will be conditional if we can't prove that the l-value
2084  // isn't null, so we need to register a dominating point so that the cleanups
2085  // system will make valid IR.
2086  CodeGenFunction::ConditionalEvaluation condEval(CGF);
2087
2088  // Zero-initialize it if we're not doing a copy-initialization.
2089  bool shouldCopy = CRE->shouldCopy();
2090  if (!shouldCopy) {
2091    llvm::Value *null =
2092      llvm::ConstantPointerNull::get(
2093        cast<llvm::PointerType>(destType->getElementType()));
2094    CGF.Builder.CreateStore(null, temp);
2095  }
2096
2097  llvm::BasicBlock *contBB = 0;
2098  llvm::BasicBlock *originBB = 0;
2099
2100  // If the address is *not* known to be non-null, we need to switch.
2101  llvm::Value *finalArgument;
2102
2103  bool provablyNonNull = isProvablyNonNull(srcAddr);
2104  if (provablyNonNull) {
2105    finalArgument = temp;
2106  } else {
2107    llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
2108
2109    finalArgument = CGF.Builder.CreateSelect(isNull,
2110                                   llvm::ConstantPointerNull::get(destType),
2111                                             temp, "icr.argument");
2112
2113    // If we need to copy, then the load has to be conditional, which
2114    // means we need control flow.
2115    if (shouldCopy) {
2116      originBB = CGF.Builder.GetInsertBlock();
2117      contBB = CGF.createBasicBlock("icr.cont");
2118      llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
2119      CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
2120      CGF.EmitBlock(copyBB);
2121      condEval.begin(CGF);
2122    }
2123  }
2124
2125  llvm::Value *valueToUse = 0;
2126
2127  // Perform a copy if necessary.
2128  if (shouldCopy) {
2129    RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
2130    assert(srcRV.isScalar());
2131
2132    llvm::Value *src = srcRV.getScalarVal();
2133    src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
2134                                    "icr.cast");
2135
2136    // Use an ordinary store, not a store-to-lvalue.
2137    CGF.Builder.CreateStore(src, temp);
2138
2139    // If optimization is enabled, and the value was held in a
2140    // __strong variable, we need to tell the optimizer that this
2141    // value has to stay alive until we're doing the store back.
2142    // This is because the temporary is effectively unretained,
2143    // and so otherwise we can violate the high-level semantics.
2144    if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
2145        srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
2146      valueToUse = src;
2147    }
2148  }
2149
2150  // Finish the control flow if we needed it.
2151  if (shouldCopy && !provablyNonNull) {
2152    llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
2153    CGF.EmitBlock(contBB);
2154
2155    // Make a phi for the value to intrinsically use.
2156    if (valueToUse) {
2157      llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
2158                                                      "icr.to-use");
2159      phiToUse->addIncoming(valueToUse, copyBB);
2160      phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
2161                            originBB);
2162      valueToUse = phiToUse;
2163    }
2164
2165    condEval.end(CGF);
2166  }
2167
2168  args.addWriteback(srcLV, temp, valueToUse);
2169  args.add(RValue::get(finalArgument), CRE->getType());
2170}
2171
2172void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
2173  assert(!StackBase && !StackCleanup.isValid());
2174
2175  // Save the stack.
2176  llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
2177  StackBase = CGF.Builder.CreateCall(F, "inalloca.save");
2178
2179  // Control gets really tied up in landing pads, so we have to spill the
2180  // stacksave to an alloca to avoid violating SSA form.
2181  // TODO: This is dead if we never emit the cleanup.  We should create the
2182  // alloca and store lazily on the first cleanup emission.
2183  StackBaseMem = CGF.CreateTempAlloca(CGF.Int8PtrTy, "inalloca.spmem");
2184  CGF.Builder.CreateStore(StackBase, StackBaseMem);
2185  CGF.pushStackRestore(EHCleanup, StackBaseMem);
2186  StackCleanup = CGF.EHStack.getInnermostEHScope();
2187  assert(StackCleanup.isValid());
2188}
2189
2190void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
2191  if (StackBase) {
2192    CGF.DeactivateCleanupBlock(StackCleanup, StackBase);
2193    llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
2194    // We could load StackBase from StackBaseMem, but in the non-exceptional
2195    // case we can skip it.
2196    CGF.Builder.CreateCall(F, StackBase);
2197  }
2198}
2199
2200void CodeGenFunction::EmitCallArgs(CallArgList &Args,
2201                                   ArrayRef<QualType> ArgTypes,
2202                                   CallExpr::const_arg_iterator ArgBeg,
2203                                   CallExpr::const_arg_iterator ArgEnd,
2204                                   bool ForceColumnInfo) {
2205  CGDebugInfo *DI = getDebugInfo();
2206  SourceLocation CallLoc;
2207  if (DI) CallLoc = DI->getLocation();
2208
2209  // We *have* to evaluate arguments from right to left in the MS C++ ABI,
2210  // because arguments are destroyed left to right in the callee.
2211  if (CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2212    // Insert a stack save if we're going to need any inalloca args.
2213    bool HasInAllocaArgs = false;
2214    for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
2215         I != E && !HasInAllocaArgs; ++I)
2216      HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
2217    if (HasInAllocaArgs) {
2218      assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
2219      Args.allocateArgumentMemory(*this);
2220    }
2221
2222    // Evaluate each argument.
2223    size_t CallArgsStart = Args.size();
2224    for (int I = ArgTypes.size() - 1; I >= 0; --I) {
2225      CallExpr::const_arg_iterator Arg = ArgBeg + I;
2226      EmitCallArg(Args, *Arg, ArgTypes[I]);
2227      // Restore the debug location.
2228      if (DI) DI->EmitLocation(Builder, CallLoc, ForceColumnInfo);
2229    }
2230
2231    // Un-reverse the arguments we just evaluated so they match up with the LLVM
2232    // IR function.
2233    std::reverse(Args.begin() + CallArgsStart, Args.end());
2234    return;
2235  }
2236
2237  for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
2238    CallExpr::const_arg_iterator Arg = ArgBeg + I;
2239    assert(Arg != ArgEnd);
2240    EmitCallArg(Args, *Arg, ArgTypes[I]);
2241    // Restore the debug location.
2242    if (DI) DI->EmitLocation(Builder, CallLoc, ForceColumnInfo);
2243  }
2244}
2245
2246namespace {
2247
2248struct DestroyUnpassedArg : EHScopeStack::Cleanup {
2249  DestroyUnpassedArg(llvm::Value *Addr, QualType Ty)
2250      : Addr(Addr), Ty(Ty) {}
2251
2252  llvm::Value *Addr;
2253  QualType Ty;
2254
2255  void Emit(CodeGenFunction &CGF, Flags flags) override {
2256    const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
2257    assert(!Dtor->isTrivial());
2258    CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
2259                              /*Delegating=*/false, Addr);
2260  }
2261};
2262
2263}
2264
2265void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
2266                                  QualType type) {
2267  if (const ObjCIndirectCopyRestoreExpr *CRE
2268        = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
2269    assert(getLangOpts().ObjCAutoRefCount);
2270    assert(getContext().hasSameType(E->getType(), type));
2271    return emitWritebackArg(*this, args, CRE);
2272  }
2273
2274  assert(type->isReferenceType() == E->isGLValue() &&
2275         "reference binding to unmaterialized r-value!");
2276
2277  if (E->isGLValue()) {
2278    assert(E->getObjectKind() == OK_Ordinary);
2279    return args.add(EmitReferenceBindingToExpr(E), type);
2280  }
2281
2282  bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
2283
2284  // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
2285  // However, we still have to push an EH-only cleanup in case we unwind before
2286  // we make it to the call.
2287  if (HasAggregateEvalKind && args.isUsingInAlloca()) {
2288    assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
2289    AggValueSlot Slot = createPlaceholderSlot(*this, type);
2290    Slot.setExternallyDestructed();
2291    EmitAggExpr(E, Slot);
2292    RValue RV = Slot.asRValue();
2293    args.add(RV, type);
2294
2295    const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
2296    if (RD->hasNonTrivialDestructor()) {
2297      // Create a no-op GEP between the placeholder and the cleanup so we can
2298      // RAUW it successfully.  It also serves as a marker of the first
2299      // instruction where the cleanup is active.
2300      pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddr(), type);
2301      // This unreachable is a temporary marker which will be removed later.
2302      llvm::Instruction *IsActive = Builder.CreateUnreachable();
2303      args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
2304    }
2305    return;
2306  }
2307
2308  if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
2309      cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
2310    LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
2311    assert(L.isSimple());
2312    if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) {
2313      args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
2314    } else {
2315      // We can't represent a misaligned lvalue in the CallArgList, so copy
2316      // to an aligned temporary now.
2317      llvm::Value *tmp = CreateMemTemp(type);
2318      EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile(),
2319                        L.getAlignment());
2320      args.add(RValue::getAggregate(tmp), type);
2321    }
2322    return;
2323  }
2324
2325  args.add(EmitAnyExprToTemp(E), type);
2326}
2327
2328// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
2329// optimizer it can aggressively ignore unwind edges.
2330void
2331CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
2332  if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
2333      !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
2334    Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
2335                      CGM.getNoObjCARCExceptionsMetadata());
2336}
2337
2338/// Emits a call to the given no-arguments nounwind runtime function.
2339llvm::CallInst *
2340CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
2341                                         const llvm::Twine &name) {
2342  return EmitNounwindRuntimeCall(callee, ArrayRef<llvm::Value*>(), name);
2343}
2344
2345/// Emits a call to the given nounwind runtime function.
2346llvm::CallInst *
2347CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
2348                                         ArrayRef<llvm::Value*> args,
2349                                         const llvm::Twine &name) {
2350  llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
2351  call->setDoesNotThrow();
2352  return call;
2353}
2354
2355/// Emits a simple call (never an invoke) to the given no-arguments
2356/// runtime function.
2357llvm::CallInst *
2358CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
2359                                 const llvm::Twine &name) {
2360  return EmitRuntimeCall(callee, ArrayRef<llvm::Value*>(), name);
2361}
2362
2363/// Emits a simple call (never an invoke) to the given runtime
2364/// function.
2365llvm::CallInst *
2366CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
2367                                 ArrayRef<llvm::Value*> args,
2368                                 const llvm::Twine &name) {
2369  llvm::CallInst *call = Builder.CreateCall(callee, args, name);
2370  call->setCallingConv(getRuntimeCC());
2371  return call;
2372}
2373
2374/// Emits a call or invoke to the given noreturn runtime function.
2375void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
2376                                               ArrayRef<llvm::Value*> args) {
2377  if (getInvokeDest()) {
2378    llvm::InvokeInst *invoke =
2379      Builder.CreateInvoke(callee,
2380                           getUnreachableBlock(),
2381                           getInvokeDest(),
2382                           args);
2383    invoke->setDoesNotReturn();
2384    invoke->setCallingConv(getRuntimeCC());
2385  } else {
2386    llvm::CallInst *call = Builder.CreateCall(callee, args);
2387    call->setDoesNotReturn();
2388    call->setCallingConv(getRuntimeCC());
2389    Builder.CreateUnreachable();
2390  }
2391  PGO.setCurrentRegionUnreachable();
2392}
2393
2394/// Emits a call or invoke instruction to the given nullary runtime
2395/// function.
2396llvm::CallSite
2397CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
2398                                         const Twine &name) {
2399  return EmitRuntimeCallOrInvoke(callee, ArrayRef<llvm::Value*>(), name);
2400}
2401
2402/// Emits a call or invoke instruction to the given runtime function.
2403llvm::CallSite
2404CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
2405                                         ArrayRef<llvm::Value*> args,
2406                                         const Twine &name) {
2407  llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
2408  callSite.setCallingConv(getRuntimeCC());
2409  return callSite;
2410}
2411
2412llvm::CallSite
2413CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
2414                                  const Twine &Name) {
2415  return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name);
2416}
2417
2418/// Emits a call or invoke instruction to the given function, depending
2419/// on the current state of the EH stack.
2420llvm::CallSite
2421CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
2422                                  ArrayRef<llvm::Value *> Args,
2423                                  const Twine &Name) {
2424  llvm::BasicBlock *InvokeDest = getInvokeDest();
2425
2426  llvm::Instruction *Inst;
2427  if (!InvokeDest)
2428    Inst = Builder.CreateCall(Callee, Args, Name);
2429  else {
2430    llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
2431    Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name);
2432    EmitBlock(ContBB);
2433  }
2434
2435  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
2436  // optimizer it can aggressively ignore unwind edges.
2437  if (CGM.getLangOpts().ObjCAutoRefCount)
2438    AddObjCARCExceptionMetadata(Inst);
2439
2440  return Inst;
2441}
2442
2443static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo,
2444                            llvm::FunctionType *FTy) {
2445  if (ArgNo < FTy->getNumParams())
2446    assert(Elt->getType() == FTy->getParamType(ArgNo));
2447  else
2448    assert(FTy->isVarArg());
2449  ++ArgNo;
2450}
2451
2452void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
2453                                       SmallVectorImpl<llvm::Value *> &Args,
2454                                       llvm::FunctionType *IRFuncTy) {
2455  if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
2456    unsigned NumElts = AT->getSize().getZExtValue();
2457    QualType EltTy = AT->getElementType();
2458    llvm::Value *Addr = RV.getAggregateAddr();
2459    for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
2460      llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
2461      RValue EltRV = convertTempToRValue(EltAddr, EltTy, SourceLocation());
2462      ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy);
2463    }
2464  } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
2465    RecordDecl *RD = RT->getDecl();
2466    assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
2467    LValue LV = MakeAddrLValue(RV.getAggregateAddr(), Ty);
2468
2469    if (RD->isUnion()) {
2470      const FieldDecl *LargestFD = 0;
2471      CharUnits UnionSize = CharUnits::Zero();
2472
2473      for (const auto *FD : RD->fields()) {
2474        assert(!FD->isBitField() &&
2475               "Cannot expand structure with bit-field members.");
2476        CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
2477        if (UnionSize < FieldSize) {
2478          UnionSize = FieldSize;
2479          LargestFD = FD;
2480        }
2481      }
2482      if (LargestFD) {
2483        RValue FldRV = EmitRValueForField(LV, LargestFD, SourceLocation());
2484        ExpandTypeToArgs(LargestFD->getType(), FldRV, Args, IRFuncTy);
2485      }
2486    } else {
2487      for (const auto *FD : RD->fields()) {
2488        RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
2489        ExpandTypeToArgs(FD->getType(), FldRV, Args, IRFuncTy);
2490      }
2491    }
2492  } else if (Ty->isAnyComplexType()) {
2493    ComplexPairTy CV = RV.getComplexVal();
2494    Args.push_back(CV.first);
2495    Args.push_back(CV.second);
2496  } else {
2497    assert(RV.isScalar() &&
2498           "Unexpected non-scalar rvalue during struct expansion.");
2499
2500    // Insert a bitcast as needed.
2501    llvm::Value *V = RV.getScalarVal();
2502    if (Args.size() < IRFuncTy->getNumParams() &&
2503        V->getType() != IRFuncTy->getParamType(Args.size()))
2504      V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size()));
2505
2506    Args.push_back(V);
2507  }
2508}
2509
2510/// \brief Store a non-aggregate value to an address to initialize it.  For
2511/// initialization, a non-atomic store will be used.
2512static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src,
2513                                        LValue Dst) {
2514  if (Src.isScalar())
2515    CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true);
2516  else
2517    CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true);
2518}
2519
2520void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
2521                                                  llvm::Value *New) {
2522  DeferredReplacements.push_back(std::make_pair(Old, New));
2523}
2524
2525RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
2526                                 llvm::Value *Callee,
2527                                 ReturnValueSlot ReturnValue,
2528                                 const CallArgList &CallArgs,
2529                                 const Decl *TargetDecl,
2530                                 llvm::Instruction **callOrInvoke) {
2531  // FIXME: We no longer need the types from CallArgs; lift up and simplify.
2532  SmallVector<llvm::Value*, 16> Args;
2533
2534  // Handle struct-return functions by passing a pointer to the
2535  // location that we would like to return into.
2536  QualType RetTy = CallInfo.getReturnType();
2537  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
2538
2539  // IRArgNo - Keep track of the argument number in the callee we're looking at.
2540  unsigned IRArgNo = 0;
2541  llvm::FunctionType *IRFuncTy =
2542    cast<llvm::FunctionType>(
2543                  cast<llvm::PointerType>(Callee->getType())->getElementType());
2544
2545  // If we're using inalloca, insert the allocation after the stack save.
2546  // FIXME: Do this earlier rather than hacking it in here!
2547  llvm::Value *ArgMemory = 0;
2548  if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
2549    llvm::AllocaInst *AI = new llvm::AllocaInst(
2550        ArgStruct, "argmem", CallArgs.getStackBase()->getNextNode());
2551    AI->setUsedWithInAlloca(true);
2552    assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
2553    ArgMemory = AI;
2554  }
2555
2556  // If the call returns a temporary with struct return, create a temporary
2557  // alloca to hold the result, unless one is given to us.
2558  llvm::Value *SRetPtr = 0;
2559  if (CGM.ReturnTypeUsesSRet(CallInfo) || RetAI.isInAlloca()) {
2560    SRetPtr = ReturnValue.getValue();
2561    if (!SRetPtr)
2562      SRetPtr = CreateMemTemp(RetTy);
2563    if (CGM.ReturnTypeUsesSRet(CallInfo)) {
2564      Args.push_back(SRetPtr);
2565      checkArgMatches(SRetPtr, IRArgNo, IRFuncTy);
2566    } else {
2567      llvm::Value *Addr =
2568          Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex());
2569      Builder.CreateStore(SRetPtr, Addr);
2570    }
2571  }
2572
2573  assert(CallInfo.arg_size() == CallArgs.size() &&
2574         "Mismatch between function signature & arguments.");
2575  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
2576  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
2577       I != E; ++I, ++info_it) {
2578    const ABIArgInfo &ArgInfo = info_it->info;
2579    RValue RV = I->RV;
2580
2581    CharUnits TypeAlign = getContext().getTypeAlignInChars(I->Ty);
2582
2583    // Insert a padding argument to ensure proper alignment.
2584    if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) {
2585      Args.push_back(llvm::UndefValue::get(PaddingType));
2586      ++IRArgNo;
2587    }
2588
2589    switch (ArgInfo.getKind()) {
2590    case ABIArgInfo::InAlloca: {
2591      assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
2592      if (RV.isAggregate()) {
2593        // Replace the placeholder with the appropriate argument slot GEP.
2594        llvm::Instruction *Placeholder =
2595            cast<llvm::Instruction>(RV.getAggregateAddr());
2596        CGBuilderTy::InsertPoint IP = Builder.saveIP();
2597        Builder.SetInsertPoint(Placeholder);
2598        llvm::Value *Addr = Builder.CreateStructGEP(
2599            ArgMemory, ArgInfo.getInAllocaFieldIndex());
2600        Builder.restoreIP(IP);
2601        deferPlaceholderReplacement(Placeholder, Addr);
2602      } else {
2603        // Store the RValue into the argument struct.
2604        llvm::Value *Addr =
2605            Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
2606        unsigned AS = Addr->getType()->getPointerAddressSpace();
2607        llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
2608        // There are some cases where a trivial bitcast is not avoidable.  The
2609        // definition of a type later in a translation unit may change it's type
2610        // from {}* to (%struct.foo*)*.
2611        if (Addr->getType() != MemType)
2612          Addr = Builder.CreateBitCast(Addr, MemType);
2613        LValue argLV = MakeAddrLValue(Addr, I->Ty, TypeAlign);
2614        EmitInitStoreOfNonAggregate(*this, RV, argLV);
2615      }
2616      break; // Don't increment IRArgNo!
2617    }
2618
2619    case ABIArgInfo::Indirect: {
2620      if (RV.isScalar() || RV.isComplex()) {
2621        // Make a temporary alloca to pass the argument.
2622        llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
2623        if (ArgInfo.getIndirectAlign() > AI->getAlignment())
2624          AI->setAlignment(ArgInfo.getIndirectAlign());
2625        Args.push_back(AI);
2626
2627        LValue argLV = MakeAddrLValue(Args.back(), I->Ty, TypeAlign);
2628        EmitInitStoreOfNonAggregate(*this, RV, argLV);
2629
2630        // Validate argument match.
2631        checkArgMatches(AI, IRArgNo, IRFuncTy);
2632      } else {
2633        // We want to avoid creating an unnecessary temporary+copy here;
2634        // however, we need one in three cases:
2635        // 1. If the argument is not byval, and we are required to copy the
2636        //    source.  (This case doesn't occur on any common architecture.)
2637        // 2. If the argument is byval, RV is not sufficiently aligned, and
2638        //    we cannot force it to be sufficiently aligned.
2639        // 3. If the argument is byval, but RV is located in an address space
2640        //    different than that of the argument (0).
2641        llvm::Value *Addr = RV.getAggregateAddr();
2642        unsigned Align = ArgInfo.getIndirectAlign();
2643        const llvm::DataLayout *TD = &CGM.getDataLayout();
2644        const unsigned RVAddrSpace = Addr->getType()->getPointerAddressSpace();
2645        const unsigned ArgAddrSpace = (IRArgNo < IRFuncTy->getNumParams() ?
2646          IRFuncTy->getParamType(IRArgNo)->getPointerAddressSpace() : 0);
2647        if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
2648            (ArgInfo.getIndirectByVal() && TypeAlign.getQuantity() < Align &&
2649             llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align) ||
2650             (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) {
2651          // Create an aligned temporary, and copy to it.
2652          llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
2653          if (Align > AI->getAlignment())
2654            AI->setAlignment(Align);
2655          Args.push_back(AI);
2656          EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
2657
2658          // Validate argument match.
2659          checkArgMatches(AI, IRArgNo, IRFuncTy);
2660        } else {
2661          // Skip the extra memcpy call.
2662          Args.push_back(Addr);
2663
2664          // Validate argument match.
2665          checkArgMatches(Addr, IRArgNo, IRFuncTy);
2666        }
2667      }
2668      break;
2669    }
2670
2671    case ABIArgInfo::Ignore:
2672      break;
2673
2674    case ABIArgInfo::Extend:
2675    case ABIArgInfo::Direct: {
2676      if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
2677          ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
2678          ArgInfo.getDirectOffset() == 0) {
2679        llvm::Value *V;
2680        if (RV.isScalar())
2681          V = RV.getScalarVal();
2682        else
2683          V = Builder.CreateLoad(RV.getAggregateAddr());
2684
2685        // If the argument doesn't match, perform a bitcast to coerce it.  This
2686        // can happen due to trivial type mismatches.
2687        if (IRArgNo < IRFuncTy->getNumParams() &&
2688            V->getType() != IRFuncTy->getParamType(IRArgNo))
2689          V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo));
2690        Args.push_back(V);
2691
2692        checkArgMatches(V, IRArgNo, IRFuncTy);
2693        break;
2694      }
2695
2696      // FIXME: Avoid the conversion through memory if possible.
2697      llvm::Value *SrcPtr;
2698      if (RV.isScalar() || RV.isComplex()) {
2699        SrcPtr = CreateMemTemp(I->Ty, "coerce");
2700        LValue SrcLV = MakeAddrLValue(SrcPtr, I->Ty, TypeAlign);
2701        EmitInitStoreOfNonAggregate(*this, RV, SrcLV);
2702      } else
2703        SrcPtr = RV.getAggregateAddr();
2704
2705      // If the value is offset in memory, apply the offset now.
2706      if (unsigned Offs = ArgInfo.getDirectOffset()) {
2707        SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
2708        SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
2709        SrcPtr = Builder.CreateBitCast(SrcPtr,
2710                       llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
2711
2712      }
2713
2714      // If the coerce-to type is a first class aggregate, we flatten it and
2715      // pass the elements. Either way is semantically identical, but fast-isel
2716      // and the optimizer generally likes scalar values better than FCAs.
2717      if (llvm::StructType *STy =
2718            dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
2719        llvm::Type *SrcTy =
2720          cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
2721        uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
2722        uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
2723
2724        // If the source type is smaller than the destination type of the
2725        // coerce-to logic, copy the source value into a temp alloca the size
2726        // of the destination type to allow loading all of it. The bits past
2727        // the source value are left undef.
2728        if (SrcSize < DstSize) {
2729          llvm::AllocaInst *TempAlloca
2730            = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce");
2731          Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0);
2732          SrcPtr = TempAlloca;
2733        } else {
2734          SrcPtr = Builder.CreateBitCast(SrcPtr,
2735                                         llvm::PointerType::getUnqual(STy));
2736        }
2737
2738        for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2739          llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
2740          llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
2741          // We don't know what we're loading from.
2742          LI->setAlignment(1);
2743          Args.push_back(LI);
2744
2745          // Validate argument match.
2746          checkArgMatches(LI, IRArgNo, IRFuncTy);
2747        }
2748      } else {
2749        // In the simple case, just pass the coerced loaded value.
2750        Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
2751                                         *this));
2752
2753        // Validate argument match.
2754        checkArgMatches(Args.back(), IRArgNo, IRFuncTy);
2755      }
2756
2757      break;
2758    }
2759
2760    case ABIArgInfo::Expand:
2761      ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy);
2762      IRArgNo = Args.size();
2763      break;
2764    }
2765  }
2766
2767  if (ArgMemory) {
2768    llvm::Value *Arg = ArgMemory;
2769    llvm::Type *LastParamTy =
2770        IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
2771    if (Arg->getType() != LastParamTy) {
2772#ifndef NDEBUG
2773      // Assert that these structs have equivalent element types.
2774      llvm::StructType *FullTy = CallInfo.getArgStruct();
2775      llvm::StructType *Prefix = cast<llvm::StructType>(
2776          cast<llvm::PointerType>(LastParamTy)->getElementType());
2777
2778      // For variadic functions, the caller might supply a larger struct than
2779      // the callee expects, and that's OK.
2780      assert(Prefix->getNumElements() == FullTy->getNumElements() ||
2781             (CallInfo.isVariadic() &&
2782              Prefix->getNumElements() <= FullTy->getNumElements()));
2783
2784      for (llvm::StructType::element_iterator PI = Prefix->element_begin(),
2785                                              PE = Prefix->element_end(),
2786                                              FI = FullTy->element_begin();
2787           PI != PE; ++PI, ++FI)
2788        assert(*PI == *FI);
2789#endif
2790      Arg = Builder.CreateBitCast(Arg, LastParamTy);
2791    }
2792    Args.push_back(Arg);
2793  }
2794
2795  if (!CallArgs.getCleanupsToDeactivate().empty())
2796    deactivateArgCleanupsBeforeCall(*this, CallArgs);
2797
2798  // If the callee is a bitcast of a function to a varargs pointer to function
2799  // type, check to see if we can remove the bitcast.  This handles some cases
2800  // with unprototyped functions.
2801  if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
2802    if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
2803      llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
2804      llvm::FunctionType *CurFT =
2805        cast<llvm::FunctionType>(CurPT->getElementType());
2806      llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
2807
2808      if (CE->getOpcode() == llvm::Instruction::BitCast &&
2809          ActualFT->getReturnType() == CurFT->getReturnType() &&
2810          ActualFT->getNumParams() == CurFT->getNumParams() &&
2811          ActualFT->getNumParams() == Args.size() &&
2812          (CurFT->isVarArg() || !ActualFT->isVarArg())) {
2813        bool ArgsMatch = true;
2814        for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
2815          if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
2816            ArgsMatch = false;
2817            break;
2818          }
2819
2820        // Strip the cast if we can get away with it.  This is a nice cleanup,
2821        // but also allows us to inline the function at -O0 if it is marked
2822        // always_inline.
2823        if (ArgsMatch)
2824          Callee = CalleeF;
2825      }
2826    }
2827
2828  unsigned CallingConv;
2829  CodeGen::AttributeListType AttributeList;
2830  CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList,
2831                             CallingConv, true);
2832  llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(),
2833                                                     AttributeList);
2834
2835  llvm::BasicBlock *InvokeDest = 0;
2836  if (!Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex,
2837                          llvm::Attribute::NoUnwind))
2838    InvokeDest = getInvokeDest();
2839
2840  llvm::CallSite CS;
2841  if (!InvokeDest) {
2842    CS = Builder.CreateCall(Callee, Args);
2843  } else {
2844    llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
2845    CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args);
2846    EmitBlock(Cont);
2847  }
2848  if (callOrInvoke)
2849    *callOrInvoke = CS.getInstruction();
2850
2851  CS.setAttributes(Attrs);
2852  CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
2853
2854  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
2855  // optimizer it can aggressively ignore unwind edges.
2856  if (CGM.getLangOpts().ObjCAutoRefCount)
2857    AddObjCARCExceptionMetadata(CS.getInstruction());
2858
2859  // If the call doesn't return, finish the basic block and clear the
2860  // insertion point; this allows the rest of IRgen to discard
2861  // unreachable code.
2862  if (CS.doesNotReturn()) {
2863    Builder.CreateUnreachable();
2864    Builder.ClearInsertionPoint();
2865
2866    // FIXME: For now, emit a dummy basic block because expr emitters in
2867    // generally are not ready to handle emitting expressions at unreachable
2868    // points.
2869    EnsureInsertPoint();
2870
2871    // Return a reasonable RValue.
2872    return GetUndefRValue(RetTy);
2873  }
2874
2875  llvm::Instruction *CI = CS.getInstruction();
2876  if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
2877    CI->setName("call");
2878
2879  // Emit any writebacks immediately.  Arguably this should happen
2880  // after any return-value munging.
2881  if (CallArgs.hasWritebacks())
2882    emitWritebacks(*this, CallArgs);
2883
2884  // The stack cleanup for inalloca arguments has to run out of the normal
2885  // lexical order, so deactivate it and run it manually here.
2886  CallArgs.freeArgumentMemory(*this);
2887
2888  switch (RetAI.getKind()) {
2889  case ABIArgInfo::InAlloca:
2890  case ABIArgInfo::Indirect:
2891    return convertTempToRValue(SRetPtr, RetTy, SourceLocation());
2892
2893  case ABIArgInfo::Ignore:
2894    // If we are ignoring an argument that had a result, make sure to
2895    // construct the appropriate return value for our caller.
2896    return GetUndefRValue(RetTy);
2897
2898  case ABIArgInfo::Extend:
2899  case ABIArgInfo::Direct: {
2900    llvm::Type *RetIRTy = ConvertType(RetTy);
2901    if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
2902      switch (getEvaluationKind(RetTy)) {
2903      case TEK_Complex: {
2904        llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
2905        llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
2906        return RValue::getComplex(std::make_pair(Real, Imag));
2907      }
2908      case TEK_Aggregate: {
2909        llvm::Value *DestPtr = ReturnValue.getValue();
2910        bool DestIsVolatile = ReturnValue.isVolatile();
2911
2912        if (!DestPtr) {
2913          DestPtr = CreateMemTemp(RetTy, "agg.tmp");
2914          DestIsVolatile = false;
2915        }
2916        BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
2917        return RValue::getAggregate(DestPtr);
2918      }
2919      case TEK_Scalar: {
2920        // If the argument doesn't match, perform a bitcast to coerce it.  This
2921        // can happen due to trivial type mismatches.
2922        llvm::Value *V = CI;
2923        if (V->getType() != RetIRTy)
2924          V = Builder.CreateBitCast(V, RetIRTy);
2925        return RValue::get(V);
2926      }
2927      }
2928      llvm_unreachable("bad evaluation kind");
2929    }
2930
2931    llvm::Value *DestPtr = ReturnValue.getValue();
2932    bool DestIsVolatile = ReturnValue.isVolatile();
2933
2934    if (!DestPtr) {
2935      DestPtr = CreateMemTemp(RetTy, "coerce");
2936      DestIsVolatile = false;
2937    }
2938
2939    // If the value is offset in memory, apply the offset now.
2940    llvm::Value *StorePtr = DestPtr;
2941    if (unsigned Offs = RetAI.getDirectOffset()) {
2942      StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
2943      StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
2944      StorePtr = Builder.CreateBitCast(StorePtr,
2945                         llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
2946    }
2947    CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
2948
2949    return convertTempToRValue(DestPtr, RetTy, SourceLocation());
2950  }
2951
2952  case ABIArgInfo::Expand:
2953    llvm_unreachable("Invalid ABI kind for return argument");
2954  }
2955
2956  llvm_unreachable("Unhandled ABIArgInfo::Kind");
2957}
2958
2959/* VarArg handling */
2960
2961llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
2962  return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
2963}
2964