1//===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// These classes wrap the information about a call or function
11// definition used to handle ABI compliancy.
12//
13//===----------------------------------------------------------------------===//
14
15#include "CGCall.h"
16#include "ABIInfo.h"
17#include "CGCXXABI.h"
18#include "CodeGenFunction.h"
19#include "CodeGenModule.h"
20#include "TargetInfo.h"
21#include "clang/AST/Decl.h"
22#include "clang/AST/DeclCXX.h"
23#include "clang/AST/DeclObjC.h"
24#include "clang/Basic/TargetInfo.h"
25#include "clang/CodeGen/CGFunctionInfo.h"
26#include "clang/Frontend/CodeGenOptions.h"
27#include "llvm/ADT/StringExtras.h"
28#include "llvm/IR/Attributes.h"
29#include "llvm/IR/CallSite.h"
30#include "llvm/IR/DataLayout.h"
31#include "llvm/IR/InlineAsm.h"
32#include "llvm/IR/Intrinsics.h"
33#include "llvm/Transforms/Utils/Local.h"
34using namespace clang;
35using namespace CodeGen;
36
37/***/
38
39static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
40  switch (CC) {
41  default: return llvm::CallingConv::C;
42  case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
43  case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
44  case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
45  case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64;
46  case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
47  case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
48  case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
49  case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
50  // TODO: add support for CC_X86Pascal to llvm
51  }
52}
53
54/// Derives the 'this' type for codegen purposes, i.e. ignoring method
55/// qualification.
56/// FIXME: address space qualification?
57static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
58  QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
59  return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
60}
61
62/// Returns the canonical formal type of the given C++ method.
63static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
64  return MD->getType()->getCanonicalTypeUnqualified()
65           .getAs<FunctionProtoType>();
66}
67
68/// Returns the "extra-canonicalized" return type, which discards
69/// qualifiers on the return type.  Codegen doesn't care about them,
70/// and it makes ABI code a little easier to be able to assume that
71/// all parameter and return types are top-level unqualified.
72static CanQualType GetReturnType(QualType RetTy) {
73  return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
74}
75
76/// Arrange the argument and result information for a value of the given
77/// unprototyped freestanding function type.
78const CGFunctionInfo &
79CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
80  // When translating an unprototyped function type, always use a
81  // variadic type.
82  return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
83                                 false, None, FTNP->getExtInfo(),
84                                 RequiredArgs(0));
85}
86
87/// Arrange the LLVM function layout for a value of the given function
88/// type, on top of any implicit parameters already stored.  Use the
89/// given ExtInfo instead of the ExtInfo from the function type.
90static const CGFunctionInfo &arrangeLLVMFunctionInfo(CodeGenTypes &CGT,
91                                                     bool IsInstanceMethod,
92                                       SmallVectorImpl<CanQualType> &prefix,
93                                             CanQual<FunctionProtoType> FTP,
94                                              FunctionType::ExtInfo extInfo) {
95  RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
96  // FIXME: Kill copy.
97  for (unsigned i = 0, e = FTP->getNumParams(); i != e; ++i)
98    prefix.push_back(FTP->getParamType(i));
99  CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
100  return CGT.arrangeLLVMFunctionInfo(resultType, IsInstanceMethod, prefix,
101                                     extInfo, required);
102}
103
104/// Arrange the argument and result information for a free function (i.e.
105/// not a C++ or ObjC instance method) of the given type.
106static const CGFunctionInfo &arrangeFreeFunctionType(CodeGenTypes &CGT,
107                                      SmallVectorImpl<CanQualType> &prefix,
108                                            CanQual<FunctionProtoType> FTP) {
109  return arrangeLLVMFunctionInfo(CGT, false, prefix, FTP, FTP->getExtInfo());
110}
111
112/// Arrange the argument and result information for a free function (i.e.
113/// not a C++ or ObjC instance method) of the given type.
114static const CGFunctionInfo &arrangeCXXMethodType(CodeGenTypes &CGT,
115                                      SmallVectorImpl<CanQualType> &prefix,
116                                            CanQual<FunctionProtoType> FTP) {
117  FunctionType::ExtInfo extInfo = FTP->getExtInfo();
118  return arrangeLLVMFunctionInfo(CGT, true, prefix, FTP, extInfo);
119}
120
121/// Arrange the argument and result information for a value of the
122/// given freestanding function type.
123const CGFunctionInfo &
124CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
125  SmallVector<CanQualType, 16> argTypes;
126  return ::arrangeFreeFunctionType(*this, argTypes, FTP);
127}
128
129static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
130  // Set the appropriate calling convention for the Function.
131  if (D->hasAttr<StdCallAttr>())
132    return CC_X86StdCall;
133
134  if (D->hasAttr<FastCallAttr>())
135    return CC_X86FastCall;
136
137  if (D->hasAttr<ThisCallAttr>())
138    return CC_X86ThisCall;
139
140  if (D->hasAttr<PascalAttr>())
141    return CC_X86Pascal;
142
143  if (PcsAttr *PCS = D->getAttr<PcsAttr>())
144    return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
145
146  if (D->hasAttr<PnaclCallAttr>())
147    return CC_PnaclCall;
148
149  if (D->hasAttr<IntelOclBiccAttr>())
150    return CC_IntelOclBicc;
151
152  if (D->hasAttr<MSABIAttr>())
153    return IsWindows ? CC_C : CC_X86_64Win64;
154
155  if (D->hasAttr<SysVABIAttr>())
156    return IsWindows ? CC_X86_64SysV : CC_C;
157
158  return CC_C;
159}
160
161static bool isAAPCSVFP(const CGFunctionInfo &FI, const TargetInfo &Target) {
162  switch (FI.getEffectiveCallingConvention()) {
163  case llvm::CallingConv::C:
164    switch (Target.getTriple().getEnvironment()) {
165    case llvm::Triple::EABIHF:
166    case llvm::Triple::GNUEABIHF:
167      return true;
168    default:
169      return false;
170    }
171  case llvm::CallingConv::ARM_AAPCS_VFP:
172    return true;
173  default:
174    return false;
175  }
176}
177
178/// Arrange the argument and result information for a call to an
179/// unknown C++ non-static member function of the given abstract type.
180/// (Zero value of RD means we don't have any meaningful "this" argument type,
181///  so fall back to a generic pointer type).
182/// The member function must be an ordinary function, i.e. not a
183/// constructor or destructor.
184const CGFunctionInfo &
185CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
186                                   const FunctionProtoType *FTP) {
187  SmallVector<CanQualType, 16> argTypes;
188
189  // Add the 'this' pointer.
190  if (RD)
191    argTypes.push_back(GetThisType(Context, RD));
192  else
193    argTypes.push_back(Context.VoidPtrTy);
194
195  return ::arrangeCXXMethodType(*this, argTypes,
196              FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
197}
198
199/// Arrange the argument and result information for a declaration or
200/// definition of the given C++ non-static member function.  The
201/// member function must be an ordinary function, i.e. not a
202/// constructor or destructor.
203const CGFunctionInfo &
204CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
205  assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
206  assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
207
208  CanQual<FunctionProtoType> prototype = GetFormalType(MD);
209
210  if (MD->isInstance()) {
211    // The abstract case is perfectly fine.
212    const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
213    return arrangeCXXMethodType(ThisType, prototype.getTypePtr());
214  }
215
216  return arrangeFreeFunctionType(prototype);
217}
218
219/// Arrange the argument and result information for a declaration
220/// or definition to the given constructor variant.
221const CGFunctionInfo &
222CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D,
223                                               CXXCtorType ctorKind) {
224  SmallVector<CanQualType, 16> argTypes;
225  argTypes.push_back(GetThisType(Context, D->getParent()));
226
227  GlobalDecl GD(D, ctorKind);
228  CanQualType resultType =
229    TheCXXABI.HasThisReturn(GD) ? argTypes.front() : Context.VoidTy;
230
231  CanQual<FunctionProtoType> FTP = GetFormalType(D);
232
233  // Add the formal parameters.
234  for (unsigned i = 0, e = FTP->getNumParams(); i != e; ++i)
235    argTypes.push_back(FTP->getParamType(i));
236
237  TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes);
238
239  RequiredArgs required =
240      (D->isVariadic() ? RequiredArgs(argTypes.size()) : RequiredArgs::All);
241
242  FunctionType::ExtInfo extInfo = FTP->getExtInfo();
243  return arrangeLLVMFunctionInfo(resultType, true, argTypes, extInfo, required);
244}
245
246/// Arrange a call to a C++ method, passing the given arguments.
247const CGFunctionInfo &
248CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
249                                        const CXXConstructorDecl *D,
250                                        CXXCtorType CtorKind,
251                                        unsigned ExtraArgs) {
252  // FIXME: Kill copy.
253  SmallVector<CanQualType, 16> ArgTypes;
254  for (CallArgList::const_iterator i = args.begin(), e = args.end(); i != e;
255       ++i)
256    ArgTypes.push_back(Context.getCanonicalParamType(i->Ty));
257
258  CanQual<FunctionProtoType> FPT = GetFormalType(D);
259  RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs);
260  GlobalDecl GD(D, CtorKind);
261  CanQualType ResultType =
262      TheCXXABI.HasThisReturn(GD) ? ArgTypes.front() : Context.VoidTy;
263
264  FunctionType::ExtInfo Info = FPT->getExtInfo();
265  return arrangeLLVMFunctionInfo(ResultType, true, ArgTypes, Info, Required);
266}
267
268/// Arrange the argument and result information for a declaration,
269/// definition, or call to the given destructor variant.  It so
270/// happens that all three cases produce the same information.
271const CGFunctionInfo &
272CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D,
273                                   CXXDtorType dtorKind) {
274  SmallVector<CanQualType, 2> argTypes;
275  argTypes.push_back(GetThisType(Context, D->getParent()));
276
277  GlobalDecl GD(D, dtorKind);
278  CanQualType resultType =
279    TheCXXABI.HasThisReturn(GD) ? argTypes.front() : Context.VoidTy;
280
281  TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes);
282
283  CanQual<FunctionProtoType> FTP = GetFormalType(D);
284  assert(FTP->getNumParams() == 0 && "dtor with formal parameters");
285  assert(FTP->isVariadic() == 0 && "dtor with formal parameters");
286
287  FunctionType::ExtInfo extInfo = FTP->getExtInfo();
288  return arrangeLLVMFunctionInfo(resultType, true, argTypes, extInfo,
289                                 RequiredArgs::All);
290}
291
292/// Arrange the argument and result information for the declaration or
293/// definition of the given function.
294const CGFunctionInfo &
295CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
296  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
297    if (MD->isInstance())
298      return arrangeCXXMethodDeclaration(MD);
299
300  CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
301
302  assert(isa<FunctionType>(FTy));
303
304  // When declaring a function without a prototype, always use a
305  // non-variadic type.
306  if (isa<FunctionNoProtoType>(FTy)) {
307    CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
308    return arrangeLLVMFunctionInfo(noProto->getReturnType(), false, None,
309                                   noProto->getExtInfo(), RequiredArgs::All);
310  }
311
312  assert(isa<FunctionProtoType>(FTy));
313  return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>());
314}
315
316/// Arrange the argument and result information for the declaration or
317/// definition of an Objective-C method.
318const CGFunctionInfo &
319CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
320  // It happens that this is the same as a call with no optional
321  // arguments, except also using the formal 'self' type.
322  return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
323}
324
325/// Arrange the argument and result information for the function type
326/// through which to perform a send to the given Objective-C method,
327/// using the given receiver type.  The receiver type is not always
328/// the 'self' type of the method or even an Objective-C pointer type.
329/// This is *not* the right method for actually performing such a
330/// message send, due to the possibility of optional arguments.
331const CGFunctionInfo &
332CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
333                                              QualType receiverType) {
334  SmallVector<CanQualType, 16> argTys;
335  argTys.push_back(Context.getCanonicalParamType(receiverType));
336  argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
337  // FIXME: Kill copy?
338  for (const auto *I : MD->params()) {
339    argTys.push_back(Context.getCanonicalParamType(I->getType()));
340  }
341
342  FunctionType::ExtInfo einfo;
343  bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
344  einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
345
346  if (getContext().getLangOpts().ObjCAutoRefCount &&
347      MD->hasAttr<NSReturnsRetainedAttr>())
348    einfo = einfo.withProducesResult(true);
349
350  RequiredArgs required =
351    (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
352
353  return arrangeLLVMFunctionInfo(GetReturnType(MD->getReturnType()), false,
354                                 argTys, einfo, required);
355}
356
357const CGFunctionInfo &
358CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
359  // FIXME: Do we need to handle ObjCMethodDecl?
360  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
361
362  if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
363    return arrangeCXXConstructorDeclaration(CD, GD.getCtorType());
364
365  if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
366    return arrangeCXXDestructor(DD, GD.getDtorType());
367
368  return arrangeFunctionDeclaration(FD);
369}
370
371/// Arrange a call as unto a free function, except possibly with an
372/// additional number of formal parameters considered required.
373static const CGFunctionInfo &
374arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
375                            CodeGenModule &CGM,
376                            const CallArgList &args,
377                            const FunctionType *fnType,
378                            unsigned numExtraRequiredArgs) {
379  assert(args.size() >= numExtraRequiredArgs);
380
381  // In most cases, there are no optional arguments.
382  RequiredArgs required = RequiredArgs::All;
383
384  // If we have a variadic prototype, the required arguments are the
385  // extra prefix plus the arguments in the prototype.
386  if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
387    if (proto->isVariadic())
388      required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
389
390  // If we don't have a prototype at all, but we're supposed to
391  // explicitly use the variadic convention for unprototyped calls,
392  // treat all of the arguments as required but preserve the nominal
393  // possibility of variadics.
394  } else if (CGM.getTargetCodeGenInfo()
395                .isNoProtoCallVariadic(args,
396                                       cast<FunctionNoProtoType>(fnType))) {
397    required = RequiredArgs(args.size());
398  }
399
400  return CGT.arrangeFreeFunctionCall(fnType->getReturnType(), args,
401                                     fnType->getExtInfo(), required);
402}
403
404/// Figure out the rules for calling a function with the given formal
405/// type using the given arguments.  The arguments are necessary
406/// because the function might be unprototyped, in which case it's
407/// target-dependent in crazy ways.
408const CGFunctionInfo &
409CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
410                                      const FunctionType *fnType) {
411  return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 0);
412}
413
414/// A block function call is essentially a free-function call with an
415/// extra implicit argument.
416const CGFunctionInfo &
417CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
418                                       const FunctionType *fnType) {
419  return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1);
420}
421
422const CGFunctionInfo &
423CodeGenTypes::arrangeFreeFunctionCall(QualType resultType,
424                                      const CallArgList &args,
425                                      FunctionType::ExtInfo info,
426                                      RequiredArgs required) {
427  // FIXME: Kill copy.
428  SmallVector<CanQualType, 16> argTypes;
429  for (CallArgList::const_iterator i = args.begin(), e = args.end();
430       i != e; ++i)
431    argTypes.push_back(Context.getCanonicalParamType(i->Ty));
432  return arrangeLLVMFunctionInfo(GetReturnType(resultType), false, argTypes,
433                                 info, required);
434}
435
436/// Arrange a call to a C++ method, passing the given arguments.
437const CGFunctionInfo &
438CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
439                                   const FunctionProtoType *FPT,
440                                   RequiredArgs required) {
441  // FIXME: Kill copy.
442  SmallVector<CanQualType, 16> argTypes;
443  for (CallArgList::const_iterator i = args.begin(), e = args.end();
444       i != e; ++i)
445    argTypes.push_back(Context.getCanonicalParamType(i->Ty));
446
447  FunctionType::ExtInfo info = FPT->getExtInfo();
448  return arrangeLLVMFunctionInfo(GetReturnType(FPT->getReturnType()), true,
449                                 argTypes, info, required);
450}
451
452const CGFunctionInfo &CodeGenTypes::arrangeFreeFunctionDeclaration(
453    QualType resultType, const FunctionArgList &args,
454    const FunctionType::ExtInfo &info, bool isVariadic) {
455  // FIXME: Kill copy.
456  SmallVector<CanQualType, 16> argTypes;
457  for (FunctionArgList::const_iterator i = args.begin(), e = args.end();
458       i != e; ++i)
459    argTypes.push_back(Context.getCanonicalParamType((*i)->getType()));
460
461  RequiredArgs required =
462    (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
463  return arrangeLLVMFunctionInfo(GetReturnType(resultType), false, argTypes, info,
464                                 required);
465}
466
467const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
468  return arrangeLLVMFunctionInfo(getContext().VoidTy, false, None,
469                                 FunctionType::ExtInfo(), RequiredArgs::All);
470}
471
472/// Arrange the argument and result information for an abstract value
473/// of a given function type.  This is the method which all of the
474/// above functions ultimately defer to.
475const CGFunctionInfo &
476CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
477                                      bool IsInstanceMethod,
478                                      ArrayRef<CanQualType> argTypes,
479                                      FunctionType::ExtInfo info,
480                                      RequiredArgs required) {
481#ifndef NDEBUG
482  for (ArrayRef<CanQualType>::const_iterator
483         I = argTypes.begin(), E = argTypes.end(); I != E; ++I)
484    assert(I->isCanonicalAsParam());
485#endif
486
487  unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
488
489  // Lookup or create unique function info.
490  llvm::FoldingSetNodeID ID;
491  CGFunctionInfo::Profile(ID, IsInstanceMethod, info, required, resultType,
492                          argTypes);
493
494  void *insertPos = nullptr;
495  CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
496  if (FI)
497    return *FI;
498
499  // Construct the function info.  We co-allocate the ArgInfos.
500  FI = CGFunctionInfo::create(CC, IsInstanceMethod, info, resultType, argTypes,
501                              required);
502  FunctionInfos.InsertNode(FI, insertPos);
503
504  bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted;
505  assert(inserted && "Recursively being processed?");
506
507  // Compute ABI information.
508  getABIInfo().computeInfo(*FI);
509
510  // Loop over all of the computed argument and return value info.  If any of
511  // them are direct or extend without a specified coerce type, specify the
512  // default now.
513  ABIArgInfo &retInfo = FI->getReturnInfo();
514  if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
515    retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
516
517  for (auto &I : FI->arguments())
518    if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
519      I.info.setCoerceToType(ConvertType(I.type));
520
521  bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
522  assert(erased && "Not in set?");
523
524  return *FI;
525}
526
527CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
528                                       bool IsInstanceMethod,
529                                       const FunctionType::ExtInfo &info,
530                                       CanQualType resultType,
531                                       ArrayRef<CanQualType> argTypes,
532                                       RequiredArgs required) {
533  void *buffer = operator new(sizeof(CGFunctionInfo) +
534                              sizeof(ArgInfo) * (argTypes.size() + 1));
535  CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
536  FI->CallingConvention = llvmCC;
537  FI->EffectiveCallingConvention = llvmCC;
538  FI->ASTCallingConvention = info.getCC();
539  FI->InstanceMethod = IsInstanceMethod;
540  FI->NoReturn = info.getNoReturn();
541  FI->ReturnsRetained = info.getProducesResult();
542  FI->Required = required;
543  FI->HasRegParm = info.getHasRegParm();
544  FI->RegParm = info.getRegParm();
545  FI->ArgStruct = nullptr;
546  FI->NumArgs = argTypes.size();
547  FI->getArgsBuffer()[0].type = resultType;
548  for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
549    FI->getArgsBuffer()[i + 1].type = argTypes[i];
550  return FI;
551}
552
553/***/
554
555void CodeGenTypes::GetExpandedTypes(QualType type,
556                     SmallVectorImpl<llvm::Type*> &expandedTypes) {
557  if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) {
558    uint64_t NumElts = AT->getSize().getZExtValue();
559    for (uint64_t Elt = 0; Elt < NumElts; ++Elt)
560      GetExpandedTypes(AT->getElementType(), expandedTypes);
561  } else if (const RecordType *RT = type->getAs<RecordType>()) {
562    const RecordDecl *RD = RT->getDecl();
563    assert(!RD->hasFlexibleArrayMember() &&
564           "Cannot expand structure with flexible array.");
565    if (RD->isUnion()) {
566      // Unions can be here only in degenerative cases - all the fields are same
567      // after flattening. Thus we have to use the "largest" field.
568      const FieldDecl *LargestFD = nullptr;
569      CharUnits UnionSize = CharUnits::Zero();
570
571      for (const auto *FD : RD->fields()) {
572        assert(!FD->isBitField() &&
573               "Cannot expand structure with bit-field members.");
574        CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
575        if (UnionSize < FieldSize) {
576          UnionSize = FieldSize;
577          LargestFD = FD;
578        }
579      }
580      if (LargestFD)
581        GetExpandedTypes(LargestFD->getType(), expandedTypes);
582    } else {
583      for (const auto *I : RD->fields()) {
584        assert(!I->isBitField() &&
585               "Cannot expand structure with bit-field members.");
586        GetExpandedTypes(I->getType(), expandedTypes);
587      }
588    }
589  } else if (const ComplexType *CT = type->getAs<ComplexType>()) {
590    llvm::Type *EltTy = ConvertType(CT->getElementType());
591    expandedTypes.push_back(EltTy);
592    expandedTypes.push_back(EltTy);
593  } else
594    expandedTypes.push_back(ConvertType(type));
595}
596
597llvm::Function::arg_iterator
598CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
599                                    llvm::Function::arg_iterator AI) {
600  assert(LV.isSimple() &&
601         "Unexpected non-simple lvalue during struct expansion.");
602
603  if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
604    unsigned NumElts = AT->getSize().getZExtValue();
605    QualType EltTy = AT->getElementType();
606    for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
607      llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, Elt);
608      LValue LV = MakeAddrLValue(EltAddr, EltTy);
609      AI = ExpandTypeFromArgs(EltTy, LV, AI);
610    }
611  } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
612    RecordDecl *RD = RT->getDecl();
613    if (RD->isUnion()) {
614      // Unions can be here only in degenerative cases - all the fields are same
615      // after flattening. Thus we have to use the "largest" field.
616      const FieldDecl *LargestFD = nullptr;
617      CharUnits UnionSize = CharUnits::Zero();
618
619      for (const auto *FD : RD->fields()) {
620        assert(!FD->isBitField() &&
621               "Cannot expand structure with bit-field members.");
622        CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
623        if (UnionSize < FieldSize) {
624          UnionSize = FieldSize;
625          LargestFD = FD;
626        }
627      }
628      if (LargestFD) {
629        // FIXME: What are the right qualifiers here?
630        LValue SubLV = EmitLValueForField(LV, LargestFD);
631        AI = ExpandTypeFromArgs(LargestFD->getType(), SubLV, AI);
632      }
633    } else {
634      for (const auto *FD : RD->fields()) {
635        QualType FT = FD->getType();
636
637        // FIXME: What are the right qualifiers here?
638        LValue SubLV = EmitLValueForField(LV, FD);
639        AI = ExpandTypeFromArgs(FT, SubLV, AI);
640      }
641    }
642  } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
643    QualType EltTy = CT->getElementType();
644    llvm::Value *RealAddr = Builder.CreateStructGEP(LV.getAddress(), 0, "real");
645    EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy));
646    llvm::Value *ImagAddr = Builder.CreateStructGEP(LV.getAddress(), 1, "imag");
647    EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy));
648  } else {
649    EmitStoreThroughLValue(RValue::get(AI), LV);
650    ++AI;
651  }
652
653  return AI;
654}
655
656/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
657/// accessing some number of bytes out of it, try to gep into the struct to get
658/// at its inner goodness.  Dive as deep as possible without entering an element
659/// with an in-memory size smaller than DstSize.
660static llvm::Value *
661EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
662                                   llvm::StructType *SrcSTy,
663                                   uint64_t DstSize, CodeGenFunction &CGF) {
664  // We can't dive into a zero-element struct.
665  if (SrcSTy->getNumElements() == 0) return SrcPtr;
666
667  llvm::Type *FirstElt = SrcSTy->getElementType(0);
668
669  // If the first elt is at least as large as what we're looking for, or if the
670  // first element is the same size as the whole struct, we can enter it.
671  uint64_t FirstEltSize =
672    CGF.CGM.getDataLayout().getTypeAllocSize(FirstElt);
673  if (FirstEltSize < DstSize &&
674      FirstEltSize < CGF.CGM.getDataLayout().getTypeAllocSize(SrcSTy))
675    return SrcPtr;
676
677  // GEP into the first element.
678  SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
679
680  // If the first element is a struct, recurse.
681  llvm::Type *SrcTy =
682    cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
683  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
684    return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
685
686  return SrcPtr;
687}
688
689/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
690/// are either integers or pointers.  This does a truncation of the value if it
691/// is too large or a zero extension if it is too small.
692///
693/// This behaves as if the value were coerced through memory, so on big-endian
694/// targets the high bits are preserved in a truncation, while little-endian
695/// targets preserve the low bits.
696static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
697                                             llvm::Type *Ty,
698                                             CodeGenFunction &CGF) {
699  if (Val->getType() == Ty)
700    return Val;
701
702  if (isa<llvm::PointerType>(Val->getType())) {
703    // If this is Pointer->Pointer avoid conversion to and from int.
704    if (isa<llvm::PointerType>(Ty))
705      return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
706
707    // Convert the pointer to an integer so we can play with its width.
708    Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
709  }
710
711  llvm::Type *DestIntTy = Ty;
712  if (isa<llvm::PointerType>(DestIntTy))
713    DestIntTy = CGF.IntPtrTy;
714
715  if (Val->getType() != DestIntTy) {
716    const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
717    if (DL.isBigEndian()) {
718      // Preserve the high bits on big-endian targets.
719      // That is what memory coercion does.
720      uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
721      uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
722
723      if (SrcSize > DstSize) {
724        Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
725        Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
726      } else {
727        Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
728        Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
729      }
730    } else {
731      // Little-endian targets preserve the low bits. No shifts required.
732      Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
733    }
734  }
735
736  if (isa<llvm::PointerType>(Ty))
737    Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
738  return Val;
739}
740
741
742
743/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
744/// a pointer to an object of type \arg Ty.
745///
746/// This safely handles the case when the src type is smaller than the
747/// destination type; in this situation the values of bits which not
748/// present in the src are undefined.
749static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
750                                      llvm::Type *Ty,
751                                      CodeGenFunction &CGF) {
752  llvm::Type *SrcTy =
753    cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
754
755  // If SrcTy and Ty are the same, just do a load.
756  if (SrcTy == Ty)
757    return CGF.Builder.CreateLoad(SrcPtr);
758
759  uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
760
761  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
762    SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
763    SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
764  }
765
766  uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
767
768  // If the source and destination are integer or pointer types, just do an
769  // extension or truncation to the desired type.
770  if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
771      (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
772    llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
773    return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
774  }
775
776  // If load is legal, just bitcast the src pointer.
777  if (SrcSize >= DstSize) {
778    // Generally SrcSize is never greater than DstSize, since this means we are
779    // losing bits. However, this can happen in cases where the structure has
780    // additional padding, for example due to a user specified alignment.
781    //
782    // FIXME: Assert that we aren't truncating non-padding bits when have access
783    // to that information.
784    llvm::Value *Casted =
785      CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
786    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
787    // FIXME: Use better alignment / avoid requiring aligned load.
788    Load->setAlignment(1);
789    return Load;
790  }
791
792  // Otherwise do coercion through memory. This is stupid, but
793  // simple.
794  llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
795  llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy();
796  llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy);
797  llvm::Value *SrcCasted = CGF.Builder.CreateBitCast(SrcPtr, I8PtrTy);
798  // FIXME: Use better alignment.
799  CGF.Builder.CreateMemCpy(Casted, SrcCasted,
800      llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
801      1, false);
802  return CGF.Builder.CreateLoad(Tmp);
803}
804
805// Function to store a first-class aggregate into memory.  We prefer to
806// store the elements rather than the aggregate to be more friendly to
807// fast-isel.
808// FIXME: Do we need to recurse here?
809static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
810                          llvm::Value *DestPtr, bool DestIsVolatile,
811                          bool LowAlignment) {
812  // Prefer scalar stores to first-class aggregate stores.
813  if (llvm::StructType *STy =
814        dyn_cast<llvm::StructType>(Val->getType())) {
815    for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
816      llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i);
817      llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
818      llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr,
819                                                    DestIsVolatile);
820      if (LowAlignment)
821        SI->setAlignment(1);
822    }
823  } else {
824    llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile);
825    if (LowAlignment)
826      SI->setAlignment(1);
827  }
828}
829
830/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
831/// where the source and destination may have different types.
832///
833/// This safely handles the case when the src type is larger than the
834/// destination type; the upper bits of the src will be lost.
835static void CreateCoercedStore(llvm::Value *Src,
836                               llvm::Value *DstPtr,
837                               bool DstIsVolatile,
838                               CodeGenFunction &CGF) {
839  llvm::Type *SrcTy = Src->getType();
840  llvm::Type *DstTy =
841    cast<llvm::PointerType>(DstPtr->getType())->getElementType();
842  if (SrcTy == DstTy) {
843    CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
844    return;
845  }
846
847  uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
848
849  if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
850    DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
851    DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
852  }
853
854  // If the source and destination are integer or pointer types, just do an
855  // extension or truncation to the desired type.
856  if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
857      (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
858    Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
859    CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
860    return;
861  }
862
863  uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
864
865  // If store is legal, just bitcast the src pointer.
866  if (SrcSize <= DstSize) {
867    llvm::Value *Casted =
868      CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
869    // FIXME: Use better alignment / avoid requiring aligned store.
870    BuildAggStore(CGF, Src, Casted, DstIsVolatile, true);
871  } else {
872    // Otherwise do coercion through memory. This is stupid, but
873    // simple.
874
875    // Generally SrcSize is never greater than DstSize, since this means we are
876    // losing bits. However, this can happen in cases where the structure has
877    // additional padding, for example due to a user specified alignment.
878    //
879    // FIXME: Assert that we aren't truncating non-padding bits when have access
880    // to that information.
881    llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
882    CGF.Builder.CreateStore(Src, Tmp);
883    llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy();
884    llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy);
885    llvm::Value *DstCasted = CGF.Builder.CreateBitCast(DstPtr, I8PtrTy);
886    // FIXME: Use better alignment.
887    CGF.Builder.CreateMemCpy(DstCasted, Casted,
888        llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
889        1, false);
890  }
891}
892
893/***/
894
895bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
896  return FI.getReturnInfo().isIndirect();
897}
898
899bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
900  return ReturnTypeUsesSRet(FI) &&
901         getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
902}
903
904bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
905  if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
906    switch (BT->getKind()) {
907    default:
908      return false;
909    case BuiltinType::Float:
910      return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
911    case BuiltinType::Double:
912      return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
913    case BuiltinType::LongDouble:
914      return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
915    }
916  }
917
918  return false;
919}
920
921bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
922  if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
923    if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
924      if (BT->getKind() == BuiltinType::LongDouble)
925        return getTarget().useObjCFP2RetForComplexLongDouble();
926    }
927  }
928
929  return false;
930}
931
932llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
933  const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
934  return GetFunctionType(FI);
935}
936
937llvm::FunctionType *
938CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
939
940  bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted;
941  assert(Inserted && "Recursively being processed?");
942
943  bool SwapThisWithSRet = false;
944  SmallVector<llvm::Type*, 8> argTypes;
945  llvm::Type *resultType = nullptr;
946
947  const ABIArgInfo &retAI = FI.getReturnInfo();
948  switch (retAI.getKind()) {
949  case ABIArgInfo::Expand:
950    llvm_unreachable("Invalid ABI kind for return argument");
951
952  case ABIArgInfo::Extend:
953  case ABIArgInfo::Direct:
954    resultType = retAI.getCoerceToType();
955    break;
956
957  case ABIArgInfo::InAlloca:
958    if (retAI.getInAllocaSRet()) {
959      // sret things on win32 aren't void, they return the sret pointer.
960      QualType ret = FI.getReturnType();
961      llvm::Type *ty = ConvertType(ret);
962      unsigned addressSpace = Context.getTargetAddressSpace(ret);
963      resultType = llvm::PointerType::get(ty, addressSpace);
964    } else {
965      resultType = llvm::Type::getVoidTy(getLLVMContext());
966    }
967    break;
968
969  case ABIArgInfo::Indirect: {
970    assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
971    resultType = llvm::Type::getVoidTy(getLLVMContext());
972
973    QualType ret = FI.getReturnType();
974    llvm::Type *ty = ConvertType(ret);
975    unsigned addressSpace = Context.getTargetAddressSpace(ret);
976    argTypes.push_back(llvm::PointerType::get(ty, addressSpace));
977
978    SwapThisWithSRet = retAI.isSRetAfterThis();
979    break;
980  }
981
982  case ABIArgInfo::Ignore:
983    resultType = llvm::Type::getVoidTy(getLLVMContext());
984    break;
985  }
986
987  // Add in all of the required arguments.
988  CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), ie;
989  if (FI.isVariadic()) {
990    ie = it + FI.getRequiredArgs().getNumRequiredArgs();
991  } else {
992    ie = FI.arg_end();
993  }
994  for (; it != ie; ++it) {
995    const ABIArgInfo &argAI = it->info;
996
997    // Insert a padding type to ensure proper alignment.
998    if (llvm::Type *PaddingType = argAI.getPaddingType())
999      argTypes.push_back(PaddingType);
1000
1001    switch (argAI.getKind()) {
1002    case ABIArgInfo::Ignore:
1003    case ABIArgInfo::InAlloca:
1004      break;
1005
1006    case ABIArgInfo::Indirect: {
1007      // indirect arguments are always on the stack, which is addr space #0.
1008      llvm::Type *LTy = ConvertTypeForMem(it->type);
1009      argTypes.push_back(LTy->getPointerTo());
1010      break;
1011    }
1012
1013    case ABIArgInfo::Extend:
1014    case ABIArgInfo::Direct: {
1015      // If the coerce-to type is a first class aggregate, flatten it.  Either
1016      // way is semantically identical, but fast-isel and the optimizer
1017      // generally likes scalar values better than FCAs.
1018      // We cannot do this for functions using the AAPCS calling convention,
1019      // as structures are treated differently by that calling convention.
1020      llvm::Type *argType = argAI.getCoerceToType();
1021      llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1022      if (st && !isAAPCSVFP(FI, getTarget())) {
1023        for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1024          argTypes.push_back(st->getElementType(i));
1025      } else {
1026        argTypes.push_back(argType);
1027      }
1028      break;
1029    }
1030
1031    case ABIArgInfo::Expand:
1032      GetExpandedTypes(it->type, argTypes);
1033      break;
1034    }
1035  }
1036
1037  // Add the inalloca struct as the last parameter type.
1038  if (llvm::StructType *ArgStruct = FI.getArgStruct())
1039    argTypes.push_back(ArgStruct->getPointerTo());
1040
1041  if (SwapThisWithSRet)
1042    std::swap(argTypes[0], argTypes[1]);
1043
1044  bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1045  assert(Erased && "Not in set?");
1046
1047  return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic());
1048}
1049
1050llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1051  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1052  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1053
1054  if (!isFuncTypeConvertible(FPT))
1055    return llvm::StructType::get(getLLVMContext());
1056
1057  const CGFunctionInfo *Info;
1058  if (isa<CXXDestructorDecl>(MD))
1059    Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType());
1060  else
1061    Info = &arrangeCXXMethodDeclaration(MD);
1062  return GetFunctionType(*Info);
1063}
1064
1065void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
1066                                           const Decl *TargetDecl,
1067                                           AttributeListType &PAL,
1068                                           unsigned &CallingConv,
1069                                           bool AttrOnCallSite) {
1070  llvm::AttrBuilder FuncAttrs;
1071  llvm::AttrBuilder RetAttrs;
1072
1073  CallingConv = FI.getEffectiveCallingConvention();
1074
1075  if (FI.isNoReturn())
1076    FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1077
1078  // FIXME: handle sseregparm someday...
1079  if (TargetDecl) {
1080    if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1081      FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1082    if (TargetDecl->hasAttr<NoThrowAttr>())
1083      FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1084    if (TargetDecl->hasAttr<NoReturnAttr>())
1085      FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1086    if (TargetDecl->hasAttr<NoDuplicateAttr>())
1087      FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1088
1089    if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1090      const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
1091      if (FPT && FPT->isNothrow(getContext()))
1092        FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1093      // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1094      // These attributes are not inherited by overloads.
1095      const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1096      if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1097        FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1098    }
1099
1100    // 'const' and 'pure' attribute functions are also nounwind.
1101    if (TargetDecl->hasAttr<ConstAttr>()) {
1102      FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1103      FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1104    } else if (TargetDecl->hasAttr<PureAttr>()) {
1105      FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1106      FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1107    }
1108    if (TargetDecl->hasAttr<MallocAttr>())
1109      RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1110  }
1111
1112  if (CodeGenOpts.OptimizeSize)
1113    FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1114  if (CodeGenOpts.OptimizeSize == 2)
1115    FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1116  if (CodeGenOpts.DisableRedZone)
1117    FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1118  if (CodeGenOpts.NoImplicitFloat)
1119    FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1120  if (CodeGenOpts.EnableSegmentedStacks &&
1121      !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1122    FuncAttrs.addAttribute("split-stack");
1123
1124  if (AttrOnCallSite) {
1125    // Attributes that should go on the call site only.
1126    if (!CodeGenOpts.SimplifyLibCalls)
1127      FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1128  } else {
1129    // Attributes that should go on the function, but not the call site.
1130    if (!CodeGenOpts.DisableFPElim) {
1131      FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1132    } else if (CodeGenOpts.OmitLeafFramePointer) {
1133      FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1134      FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1135    } else {
1136      FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1137      FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1138    }
1139
1140    FuncAttrs.addAttribute("less-precise-fpmad",
1141                           llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1142    FuncAttrs.addAttribute("no-infs-fp-math",
1143                           llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1144    FuncAttrs.addAttribute("no-nans-fp-math",
1145                           llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1146    FuncAttrs.addAttribute("unsafe-fp-math",
1147                           llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1148    FuncAttrs.addAttribute("use-soft-float",
1149                           llvm::toStringRef(CodeGenOpts.SoftFloat));
1150    FuncAttrs.addAttribute("stack-protector-buffer-size",
1151                           llvm::utostr(CodeGenOpts.SSPBufferSize));
1152
1153    if (!CodeGenOpts.StackRealignment)
1154      FuncAttrs.addAttribute("no-realign-stack");
1155  }
1156
1157  QualType RetTy = FI.getReturnType();
1158  unsigned Index = 1;
1159  bool SwapThisWithSRet = false;
1160  const ABIArgInfo &RetAI = FI.getReturnInfo();
1161  switch (RetAI.getKind()) {
1162  case ABIArgInfo::Extend:
1163    if (RetTy->hasSignedIntegerRepresentation())
1164      RetAttrs.addAttribute(llvm::Attribute::SExt);
1165    else if (RetTy->hasUnsignedIntegerRepresentation())
1166      RetAttrs.addAttribute(llvm::Attribute::ZExt);
1167    // FALL THROUGH
1168  case ABIArgInfo::Direct:
1169    if (RetAI.getInReg())
1170      RetAttrs.addAttribute(llvm::Attribute::InReg);
1171    break;
1172  case ABIArgInfo::Ignore:
1173    break;
1174
1175  case ABIArgInfo::InAlloca: {
1176    // inalloca disables readnone and readonly
1177    FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1178      .removeAttribute(llvm::Attribute::ReadNone);
1179    break;
1180  }
1181
1182  case ABIArgInfo::Indirect: {
1183    llvm::AttrBuilder SRETAttrs;
1184    SRETAttrs.addAttribute(llvm::Attribute::StructRet);
1185    if (RetAI.getInReg())
1186      SRETAttrs.addAttribute(llvm::Attribute::InReg);
1187    SwapThisWithSRet = RetAI.isSRetAfterThis();
1188    PAL.push_back(llvm::AttributeSet::get(
1189        getLLVMContext(), SwapThisWithSRet ? 2 : Index, SRETAttrs));
1190
1191    if (!SwapThisWithSRet)
1192      ++Index;
1193    // sret disables readnone and readonly
1194    FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1195      .removeAttribute(llvm::Attribute::ReadNone);
1196    break;
1197  }
1198
1199  case ABIArgInfo::Expand:
1200    llvm_unreachable("Invalid ABI kind for return argument");
1201  }
1202
1203  if (RetTy->isReferenceType())
1204    RetAttrs.addAttribute(llvm::Attribute::NonNull);
1205
1206  if (RetAttrs.hasAttributes())
1207    PAL.push_back(llvm::
1208                  AttributeSet::get(getLLVMContext(),
1209                                    llvm::AttributeSet::ReturnIndex,
1210                                    RetAttrs));
1211
1212  for (const auto &I : FI.arguments()) {
1213    QualType ParamType = I.type;
1214    const ABIArgInfo &AI = I.info;
1215    llvm::AttrBuilder Attrs;
1216
1217    // Skip over the sret parameter when it comes second.  We already handled it
1218    // above.
1219    if (Index == 2 && SwapThisWithSRet)
1220      ++Index;
1221
1222    if (AI.getPaddingType()) {
1223      if (AI.getPaddingInReg())
1224        PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index,
1225                                              llvm::Attribute::InReg));
1226      // Increment Index if there is padding.
1227      ++Index;
1228    }
1229
1230    // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
1231    // have the corresponding parameter variable.  It doesn't make
1232    // sense to do it here because parameters are so messed up.
1233    switch (AI.getKind()) {
1234    case ABIArgInfo::Extend:
1235      if (ParamType->isSignedIntegerOrEnumerationType())
1236        Attrs.addAttribute(llvm::Attribute::SExt);
1237      else if (ParamType->isUnsignedIntegerOrEnumerationType())
1238        Attrs.addAttribute(llvm::Attribute::ZExt);
1239      // FALL THROUGH
1240    case ABIArgInfo::Direct: {
1241      if (AI.getInReg())
1242        Attrs.addAttribute(llvm::Attribute::InReg);
1243
1244      // FIXME: handle sseregparm someday...
1245
1246      llvm::StructType *STy =
1247          dyn_cast<llvm::StructType>(AI.getCoerceToType());
1248      if (!isAAPCSVFP(FI, getTarget()) && STy) {
1249        unsigned Extra = STy->getNumElements()-1;  // 1 will be added below.
1250        if (Attrs.hasAttributes())
1251          for (unsigned I = 0; I < Extra; ++I)
1252            PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index + I,
1253                                                  Attrs));
1254        Index += Extra;
1255      }
1256      break;
1257    }
1258    case ABIArgInfo::Indirect:
1259      if (AI.getInReg())
1260        Attrs.addAttribute(llvm::Attribute::InReg);
1261
1262      if (AI.getIndirectByVal())
1263        Attrs.addAttribute(llvm::Attribute::ByVal);
1264
1265      Attrs.addAlignmentAttr(AI.getIndirectAlign());
1266
1267      // byval disables readnone and readonly.
1268      FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1269        .removeAttribute(llvm::Attribute::ReadNone);
1270      break;
1271
1272    case ABIArgInfo::Ignore:
1273      // Skip increment, no matching LLVM parameter.
1274      continue;
1275
1276    case ABIArgInfo::InAlloca:
1277      // inalloca disables readnone and readonly.
1278      FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1279          .removeAttribute(llvm::Attribute::ReadNone);
1280      // Skip increment, no matching LLVM parameter.
1281      continue;
1282
1283    case ABIArgInfo::Expand: {
1284      SmallVector<llvm::Type*, 8> types;
1285      // FIXME: This is rather inefficient. Do we ever actually need to do
1286      // anything here? The result should be just reconstructed on the other
1287      // side, so extension should be a non-issue.
1288      getTypes().GetExpandedTypes(ParamType, types);
1289      Index += types.size();
1290      continue;
1291    }
1292    }
1293
1294    if (ParamType->isReferenceType())
1295      Attrs.addAttribute(llvm::Attribute::NonNull);
1296
1297    if (Attrs.hasAttributes())
1298      PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index, Attrs));
1299    ++Index;
1300  }
1301
1302  // Add the inalloca attribute to the trailing inalloca parameter if present.
1303  if (FI.usesInAlloca()) {
1304    llvm::AttrBuilder Attrs;
1305    Attrs.addAttribute(llvm::Attribute::InAlloca);
1306    PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index, Attrs));
1307  }
1308
1309  if (FuncAttrs.hasAttributes())
1310    PAL.push_back(llvm::
1311                  AttributeSet::get(getLLVMContext(),
1312                                    llvm::AttributeSet::FunctionIndex,
1313                                    FuncAttrs));
1314}
1315
1316/// An argument came in as a promoted argument; demote it back to its
1317/// declared type.
1318static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
1319                                         const VarDecl *var,
1320                                         llvm::Value *value) {
1321  llvm::Type *varType = CGF.ConvertType(var->getType());
1322
1323  // This can happen with promotions that actually don't change the
1324  // underlying type, like the enum promotions.
1325  if (value->getType() == varType) return value;
1326
1327  assert((varType->isIntegerTy() || varType->isFloatingPointTy())
1328         && "unexpected promotion type");
1329
1330  if (isa<llvm::IntegerType>(varType))
1331    return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
1332
1333  return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
1334}
1335
1336void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
1337                                         llvm::Function *Fn,
1338                                         const FunctionArgList &Args) {
1339  // If this is an implicit-return-zero function, go ahead and
1340  // initialize the return value.  TODO: it might be nice to have
1341  // a more general mechanism for this that didn't require synthesized
1342  // return statements.
1343  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
1344    if (FD->hasImplicitReturnZero()) {
1345      QualType RetTy = FD->getReturnType().getUnqualifiedType();
1346      llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
1347      llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
1348      Builder.CreateStore(Zero, ReturnValue);
1349    }
1350  }
1351
1352  // FIXME: We no longer need the types from FunctionArgList; lift up and
1353  // simplify.
1354
1355  // Emit allocs for param decls.  Give the LLVM Argument nodes names.
1356  llvm::Function::arg_iterator AI = Fn->arg_begin();
1357
1358  // If we're using inalloca, all the memory arguments are GEPs off of the last
1359  // parameter, which is a pointer to the complete memory area.
1360  llvm::Value *ArgStruct = nullptr;
1361  if (FI.usesInAlloca()) {
1362    llvm::Function::arg_iterator EI = Fn->arg_end();
1363    --EI;
1364    ArgStruct = EI;
1365    assert(ArgStruct->getType() == FI.getArgStruct()->getPointerTo());
1366  }
1367
1368  // Name the struct return parameter, which can come first or second.
1369  const ABIArgInfo &RetAI = FI.getReturnInfo();
1370  bool SwapThisWithSRet = false;
1371  if (RetAI.isIndirect()) {
1372    SwapThisWithSRet = RetAI.isSRetAfterThis();
1373    if (SwapThisWithSRet)
1374      ++AI;
1375    AI->setName("agg.result");
1376    AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1,
1377                                        llvm::Attribute::NoAlias));
1378    if (SwapThisWithSRet)
1379      --AI;  // Go back to the beginning for 'this'.
1380    else
1381      ++AI;  // Skip the sret parameter.
1382  }
1383
1384  // Track if we received the parameter as a pointer (indirect, byval, or
1385  // inalloca).  If already have a pointer, EmitParmDecl doesn't need to copy it
1386  // into a local alloca for us.
1387  enum ValOrPointer { HaveValue = 0, HavePointer = 1 };
1388  typedef llvm::PointerIntPair<llvm::Value *, 1> ValueAndIsPtr;
1389  SmallVector<ValueAndIsPtr, 16> ArgVals;
1390  ArgVals.reserve(Args.size());
1391
1392  // Create a pointer value for every parameter declaration.  This usually
1393  // entails copying one or more LLVM IR arguments into an alloca.  Don't push
1394  // any cleanups or do anything that might unwind.  We do that separately, so
1395  // we can push the cleanups in the correct order for the ABI.
1396  assert(FI.arg_size() == Args.size() &&
1397         "Mismatch between function signature & arguments.");
1398  unsigned ArgNo = 1;
1399  CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
1400  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1401       i != e; ++i, ++info_it, ++ArgNo) {
1402    const VarDecl *Arg = *i;
1403    QualType Ty = info_it->type;
1404    const ABIArgInfo &ArgI = info_it->info;
1405
1406    bool isPromoted =
1407      isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
1408
1409    // Skip the dummy padding argument.
1410    if (ArgI.getPaddingType())
1411      ++AI;
1412
1413    switch (ArgI.getKind()) {
1414    case ABIArgInfo::InAlloca: {
1415      llvm::Value *V = Builder.CreateStructGEP(
1416          ArgStruct, ArgI.getInAllocaFieldIndex(), Arg->getName());
1417      ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
1418      continue;  // Don't increment AI!
1419    }
1420
1421    case ABIArgInfo::Indirect: {
1422      llvm::Value *V = AI;
1423
1424      if (!hasScalarEvaluationKind(Ty)) {
1425        // Aggregates and complex variables are accessed by reference.  All we
1426        // need to do is realign the value, if requested
1427        if (ArgI.getIndirectRealign()) {
1428          llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
1429
1430          // Copy from the incoming argument pointer to the temporary with the
1431          // appropriate alignment.
1432          //
1433          // FIXME: We should have a common utility for generating an aggregate
1434          // copy.
1435          llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
1436          CharUnits Size = getContext().getTypeSizeInChars(Ty);
1437          llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
1438          llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
1439          Builder.CreateMemCpy(Dst,
1440                               Src,
1441                               llvm::ConstantInt::get(IntPtrTy,
1442                                                      Size.getQuantity()),
1443                               ArgI.getIndirectAlign(),
1444                               false);
1445          V = AlignedTemp;
1446        }
1447        ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
1448      } else {
1449        // Load scalar value from indirect argument.
1450        CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
1451        V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty,
1452                             Arg->getLocStart());
1453
1454        if (isPromoted)
1455          V = emitArgumentDemotion(*this, Arg, V);
1456        ArgVals.push_back(ValueAndIsPtr(V, HaveValue));
1457      }
1458      break;
1459    }
1460
1461    case ABIArgInfo::Extend:
1462    case ABIArgInfo::Direct: {
1463
1464      // If we have the trivial case, handle it with no muss and fuss.
1465      if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
1466          ArgI.getCoerceToType() == ConvertType(Ty) &&
1467          ArgI.getDirectOffset() == 0) {
1468        assert(AI != Fn->arg_end() && "Argument mismatch!");
1469        llvm::Value *V = AI;
1470
1471        if (Arg->getType().isRestrictQualified())
1472          AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1473                                              AI->getArgNo() + 1,
1474                                              llvm::Attribute::NoAlias));
1475
1476        // Ensure the argument is the correct type.
1477        if (V->getType() != ArgI.getCoerceToType())
1478          V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
1479
1480        if (isPromoted)
1481          V = emitArgumentDemotion(*this, Arg, V);
1482
1483        if (const CXXMethodDecl *MD =
1484            dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) {
1485          if (MD->isVirtual() && Arg == CXXABIThisDecl)
1486            V = CGM.getCXXABI().
1487                adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V);
1488        }
1489
1490        // Because of merging of function types from multiple decls it is
1491        // possible for the type of an argument to not match the corresponding
1492        // type in the function type. Since we are codegening the callee
1493        // in here, add a cast to the argument type.
1494        llvm::Type *LTy = ConvertType(Arg->getType());
1495        if (V->getType() != LTy)
1496          V = Builder.CreateBitCast(V, LTy);
1497
1498        ArgVals.push_back(ValueAndIsPtr(V, HaveValue));
1499        break;
1500      }
1501
1502      llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName());
1503
1504      // The alignment we need to use is the max of the requested alignment for
1505      // the argument plus the alignment required by our access code below.
1506      unsigned AlignmentToUse =
1507        CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType());
1508      AlignmentToUse = std::max(AlignmentToUse,
1509                        (unsigned)getContext().getDeclAlign(Arg).getQuantity());
1510
1511      Alloca->setAlignment(AlignmentToUse);
1512      llvm::Value *V = Alloca;
1513      llvm::Value *Ptr = V;    // Pointer to store into.
1514
1515      // If the value is offset in memory, apply the offset now.
1516      if (unsigned Offs = ArgI.getDirectOffset()) {
1517        Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
1518        Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
1519        Ptr = Builder.CreateBitCast(Ptr,
1520                          llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
1521      }
1522
1523      // If the coerce-to type is a first class aggregate, we flatten it and
1524      // pass the elements. Either way is semantically identical, but fast-isel
1525      // and the optimizer generally likes scalar values better than FCAs.
1526      // We cannot do this for functions using the AAPCS calling convention,
1527      // as structures are treated differently by that calling convention.
1528      llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
1529      if (!isAAPCSVFP(FI, getTarget()) && STy && STy->getNumElements() > 1) {
1530        uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
1531        llvm::Type *DstTy =
1532          cast<llvm::PointerType>(Ptr->getType())->getElementType();
1533        uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
1534
1535        if (SrcSize <= DstSize) {
1536          Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
1537
1538          for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1539            assert(AI != Fn->arg_end() && "Argument mismatch!");
1540            AI->setName(Arg->getName() + ".coerce" + Twine(i));
1541            llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
1542            Builder.CreateStore(AI++, EltPtr);
1543          }
1544        } else {
1545          llvm::AllocaInst *TempAlloca =
1546            CreateTempAlloca(ArgI.getCoerceToType(), "coerce");
1547          TempAlloca->setAlignment(AlignmentToUse);
1548          llvm::Value *TempV = TempAlloca;
1549
1550          for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1551            assert(AI != Fn->arg_end() && "Argument mismatch!");
1552            AI->setName(Arg->getName() + ".coerce" + Twine(i));
1553            llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i);
1554            Builder.CreateStore(AI++, EltPtr);
1555          }
1556
1557          Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse);
1558        }
1559      } else {
1560        // Simple case, just do a coerced store of the argument into the alloca.
1561        assert(AI != Fn->arg_end() && "Argument mismatch!");
1562        AI->setName(Arg->getName() + ".coerce");
1563        CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
1564      }
1565
1566
1567      // Match to what EmitParmDecl is expecting for this type.
1568      if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
1569        V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty, Arg->getLocStart());
1570        if (isPromoted)
1571          V = emitArgumentDemotion(*this, Arg, V);
1572        ArgVals.push_back(ValueAndIsPtr(V, HaveValue));
1573      } else {
1574        ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
1575      }
1576      continue;  // Skip ++AI increment, already done.
1577    }
1578
1579    case ABIArgInfo::Expand: {
1580      // If this structure was expanded into multiple arguments then
1581      // we need to create a temporary and reconstruct it from the
1582      // arguments.
1583      llvm::AllocaInst *Alloca = CreateMemTemp(Ty);
1584      CharUnits Align = getContext().getDeclAlign(Arg);
1585      Alloca->setAlignment(Align.getQuantity());
1586      LValue LV = MakeAddrLValue(Alloca, Ty, Align);
1587      llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI);
1588      ArgVals.push_back(ValueAndIsPtr(Alloca, HavePointer));
1589
1590      // Name the arguments used in expansion and increment AI.
1591      unsigned Index = 0;
1592      for (; AI != End; ++AI, ++Index)
1593        AI->setName(Arg->getName() + "." + Twine(Index));
1594      continue;
1595    }
1596
1597    case ABIArgInfo::Ignore:
1598      // Initialize the local variable appropriately.
1599      if (!hasScalarEvaluationKind(Ty)) {
1600        ArgVals.push_back(ValueAndIsPtr(CreateMemTemp(Ty), HavePointer));
1601      } else {
1602        llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
1603        ArgVals.push_back(ValueAndIsPtr(U, HaveValue));
1604      }
1605
1606      // Skip increment, no matching LLVM parameter.
1607      continue;
1608    }
1609
1610    ++AI;
1611
1612    if (ArgNo == 1 && SwapThisWithSRet)
1613      ++AI;  // Skip the sret parameter.
1614  }
1615
1616  if (FI.usesInAlloca())
1617    ++AI;
1618  assert(AI == Fn->arg_end() && "Argument mismatch!");
1619
1620  if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
1621    for (int I = Args.size() - 1; I >= 0; --I)
1622      EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(),
1623                   I + 1);
1624  } else {
1625    for (unsigned I = 0, E = Args.size(); I != E; ++I)
1626      EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(),
1627                   I + 1);
1628  }
1629}
1630
1631static void eraseUnusedBitCasts(llvm::Instruction *insn) {
1632  while (insn->use_empty()) {
1633    llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
1634    if (!bitcast) return;
1635
1636    // This is "safe" because we would have used a ConstantExpr otherwise.
1637    insn = cast<llvm::Instruction>(bitcast->getOperand(0));
1638    bitcast->eraseFromParent();
1639  }
1640}
1641
1642/// Try to emit a fused autorelease of a return result.
1643static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
1644                                                    llvm::Value *result) {
1645  // We must be immediately followed the cast.
1646  llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
1647  if (BB->empty()) return nullptr;
1648  if (&BB->back() != result) return nullptr;
1649
1650  llvm::Type *resultType = result->getType();
1651
1652  // result is in a BasicBlock and is therefore an Instruction.
1653  llvm::Instruction *generator = cast<llvm::Instruction>(result);
1654
1655  SmallVector<llvm::Instruction*,4> insnsToKill;
1656
1657  // Look for:
1658  //  %generator = bitcast %type1* %generator2 to %type2*
1659  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
1660    // We would have emitted this as a constant if the operand weren't
1661    // an Instruction.
1662    generator = cast<llvm::Instruction>(bitcast->getOperand(0));
1663
1664    // Require the generator to be immediately followed by the cast.
1665    if (generator->getNextNode() != bitcast)
1666      return nullptr;
1667
1668    insnsToKill.push_back(bitcast);
1669  }
1670
1671  // Look for:
1672  //   %generator = call i8* @objc_retain(i8* %originalResult)
1673  // or
1674  //   %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
1675  llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
1676  if (!call) return nullptr;
1677
1678  bool doRetainAutorelease;
1679
1680  if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) {
1681    doRetainAutorelease = true;
1682  } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints()
1683                                          .objc_retainAutoreleasedReturnValue) {
1684    doRetainAutorelease = false;
1685
1686    // If we emitted an assembly marker for this call (and the
1687    // ARCEntrypoints field should have been set if so), go looking
1688    // for that call.  If we can't find it, we can't do this
1689    // optimization.  But it should always be the immediately previous
1690    // instruction, unless we needed bitcasts around the call.
1691    if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) {
1692      llvm::Instruction *prev = call->getPrevNode();
1693      assert(prev);
1694      if (isa<llvm::BitCastInst>(prev)) {
1695        prev = prev->getPrevNode();
1696        assert(prev);
1697      }
1698      assert(isa<llvm::CallInst>(prev));
1699      assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
1700               CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker);
1701      insnsToKill.push_back(prev);
1702    }
1703  } else {
1704    return nullptr;
1705  }
1706
1707  result = call->getArgOperand(0);
1708  insnsToKill.push_back(call);
1709
1710  // Keep killing bitcasts, for sanity.  Note that we no longer care
1711  // about precise ordering as long as there's exactly one use.
1712  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
1713    if (!bitcast->hasOneUse()) break;
1714    insnsToKill.push_back(bitcast);
1715    result = bitcast->getOperand(0);
1716  }
1717
1718  // Delete all the unnecessary instructions, from latest to earliest.
1719  for (SmallVectorImpl<llvm::Instruction*>::iterator
1720         i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
1721    (*i)->eraseFromParent();
1722
1723  // Do the fused retain/autorelease if we were asked to.
1724  if (doRetainAutorelease)
1725    result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
1726
1727  // Cast back to the result type.
1728  return CGF.Builder.CreateBitCast(result, resultType);
1729}
1730
1731/// If this is a +1 of the value of an immutable 'self', remove it.
1732static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
1733                                          llvm::Value *result) {
1734  // This is only applicable to a method with an immutable 'self'.
1735  const ObjCMethodDecl *method =
1736    dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
1737  if (!method) return nullptr;
1738  const VarDecl *self = method->getSelfDecl();
1739  if (!self->getType().isConstQualified()) return nullptr;
1740
1741  // Look for a retain call.
1742  llvm::CallInst *retainCall =
1743    dyn_cast<llvm::CallInst>(result->stripPointerCasts());
1744  if (!retainCall ||
1745      retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain)
1746    return nullptr;
1747
1748  // Look for an ordinary load of 'self'.
1749  llvm::Value *retainedValue = retainCall->getArgOperand(0);
1750  llvm::LoadInst *load =
1751    dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
1752  if (!load || load->isAtomic() || load->isVolatile() ||
1753      load->getPointerOperand() != CGF.GetAddrOfLocalVar(self))
1754    return nullptr;
1755
1756  // Okay!  Burn it all down.  This relies for correctness on the
1757  // assumption that the retain is emitted as part of the return and
1758  // that thereafter everything is used "linearly".
1759  llvm::Type *resultType = result->getType();
1760  eraseUnusedBitCasts(cast<llvm::Instruction>(result));
1761  assert(retainCall->use_empty());
1762  retainCall->eraseFromParent();
1763  eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
1764
1765  return CGF.Builder.CreateBitCast(load, resultType);
1766}
1767
1768/// Emit an ARC autorelease of the result of a function.
1769///
1770/// \return the value to actually return from the function
1771static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
1772                                            llvm::Value *result) {
1773  // If we're returning 'self', kill the initial retain.  This is a
1774  // heuristic attempt to "encourage correctness" in the really unfortunate
1775  // case where we have a return of self during a dealloc and we desperately
1776  // need to avoid the possible autorelease.
1777  if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
1778    return self;
1779
1780  // At -O0, try to emit a fused retain/autorelease.
1781  if (CGF.shouldUseFusedARCCalls())
1782    if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
1783      return fused;
1784
1785  return CGF.EmitARCAutoreleaseReturnValue(result);
1786}
1787
1788/// Heuristically search for a dominating store to the return-value slot.
1789static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
1790  // If there are multiple uses of the return-value slot, just check
1791  // for something immediately preceding the IP.  Sometimes this can
1792  // happen with how we generate implicit-returns; it can also happen
1793  // with noreturn cleanups.
1794  if (!CGF.ReturnValue->hasOneUse()) {
1795    llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
1796    if (IP->empty()) return nullptr;
1797    llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back());
1798    if (!store) return nullptr;
1799    if (store->getPointerOperand() != CGF.ReturnValue) return nullptr;
1800    assert(!store->isAtomic() && !store->isVolatile()); // see below
1801    return store;
1802  }
1803
1804  llvm::StoreInst *store =
1805    dyn_cast<llvm::StoreInst>(CGF.ReturnValue->user_back());
1806  if (!store) return nullptr;
1807
1808  // These aren't actually possible for non-coerced returns, and we
1809  // only care about non-coerced returns on this code path.
1810  assert(!store->isAtomic() && !store->isVolatile());
1811
1812  // Now do a first-and-dirty dominance check: just walk up the
1813  // single-predecessors chain from the current insertion point.
1814  llvm::BasicBlock *StoreBB = store->getParent();
1815  llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
1816  while (IP != StoreBB) {
1817    if (!(IP = IP->getSinglePredecessor()))
1818      return nullptr;
1819  }
1820
1821  // Okay, the store's basic block dominates the insertion point; we
1822  // can do our thing.
1823  return store;
1824}
1825
1826void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
1827                                         bool EmitRetDbgLoc,
1828                                         SourceLocation EndLoc) {
1829  // Functions with no result always return void.
1830  if (!ReturnValue) {
1831    Builder.CreateRetVoid();
1832    return;
1833  }
1834
1835  llvm::DebugLoc RetDbgLoc;
1836  llvm::Value *RV = nullptr;
1837  QualType RetTy = FI.getReturnType();
1838  const ABIArgInfo &RetAI = FI.getReturnInfo();
1839
1840  switch (RetAI.getKind()) {
1841  case ABIArgInfo::InAlloca:
1842    // Aggregrates get evaluated directly into the destination.  Sometimes we
1843    // need to return the sret value in a register, though.
1844    assert(hasAggregateEvaluationKind(RetTy));
1845    if (RetAI.getInAllocaSRet()) {
1846      llvm::Function::arg_iterator EI = CurFn->arg_end();
1847      --EI;
1848      llvm::Value *ArgStruct = EI;
1849      llvm::Value *SRet =
1850          Builder.CreateStructGEP(ArgStruct, RetAI.getInAllocaFieldIndex());
1851      RV = Builder.CreateLoad(SRet, "sret");
1852    }
1853    break;
1854
1855  case ABIArgInfo::Indirect: {
1856    auto AI = CurFn->arg_begin();
1857    if (RetAI.isSRetAfterThis())
1858      ++AI;
1859    switch (getEvaluationKind(RetTy)) {
1860    case TEK_Complex: {
1861      ComplexPairTy RT =
1862        EmitLoadOfComplex(MakeNaturalAlignAddrLValue(ReturnValue, RetTy),
1863                          EndLoc);
1864      EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(AI, RetTy),
1865                         /*isInit*/ true);
1866      break;
1867    }
1868    case TEK_Aggregate:
1869      // Do nothing; aggregrates get evaluated directly into the destination.
1870      break;
1871    case TEK_Scalar:
1872      EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
1873                        MakeNaturalAlignAddrLValue(AI, RetTy),
1874                        /*isInit*/ true);
1875      break;
1876    }
1877    break;
1878  }
1879
1880  case ABIArgInfo::Extend:
1881  case ABIArgInfo::Direct:
1882    if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
1883        RetAI.getDirectOffset() == 0) {
1884      // The internal return value temp always will have pointer-to-return-type
1885      // type, just do a load.
1886
1887      // If there is a dominating store to ReturnValue, we can elide
1888      // the load, zap the store, and usually zap the alloca.
1889      if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) {
1890        // Reuse the debug location from the store unless there is
1891        // cleanup code to be emitted between the store and return
1892        // instruction.
1893        if (EmitRetDbgLoc && !AutoreleaseResult)
1894          RetDbgLoc = SI->getDebugLoc();
1895        // Get the stored value and nuke the now-dead store.
1896        RV = SI->getValueOperand();
1897        SI->eraseFromParent();
1898
1899        // If that was the only use of the return value, nuke it as well now.
1900        if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
1901          cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
1902          ReturnValue = nullptr;
1903        }
1904
1905      // Otherwise, we have to do a simple load.
1906      } else {
1907        RV = Builder.CreateLoad(ReturnValue);
1908      }
1909    } else {
1910      llvm::Value *V = ReturnValue;
1911      // If the value is offset in memory, apply the offset now.
1912      if (unsigned Offs = RetAI.getDirectOffset()) {
1913        V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
1914        V = Builder.CreateConstGEP1_32(V, Offs);
1915        V = Builder.CreateBitCast(V,
1916                         llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
1917      }
1918
1919      RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
1920    }
1921
1922    // In ARC, end functions that return a retainable type with a call
1923    // to objc_autoreleaseReturnValue.
1924    if (AutoreleaseResult) {
1925      assert(getLangOpts().ObjCAutoRefCount &&
1926             !FI.isReturnsRetained() &&
1927             RetTy->isObjCRetainableType());
1928      RV = emitAutoreleaseOfResult(*this, RV);
1929    }
1930
1931    break;
1932
1933  case ABIArgInfo::Ignore:
1934    break;
1935
1936  case ABIArgInfo::Expand:
1937    llvm_unreachable("Invalid ABI kind for return argument");
1938  }
1939
1940  llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
1941  if (!RetDbgLoc.isUnknown())
1942    Ret->setDebugLoc(RetDbgLoc);
1943}
1944
1945static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
1946  const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
1947  return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
1948}
1949
1950static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty) {
1951  // FIXME: Generate IR in one pass, rather than going back and fixing up these
1952  // placeholders.
1953  llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
1954  llvm::Value *Placeholder =
1955      llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo());
1956  Placeholder = CGF.Builder.CreateLoad(Placeholder);
1957  return AggValueSlot::forAddr(Placeholder, CharUnits::Zero(),
1958                               Ty.getQualifiers(),
1959                               AggValueSlot::IsNotDestructed,
1960                               AggValueSlot::DoesNotNeedGCBarriers,
1961                               AggValueSlot::IsNotAliased);
1962}
1963
1964void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
1965                                          const VarDecl *param,
1966                                          SourceLocation loc) {
1967  // StartFunction converted the ABI-lowered parameter(s) into a
1968  // local alloca.  We need to turn that into an r-value suitable
1969  // for EmitCall.
1970  llvm::Value *local = GetAddrOfLocalVar(param);
1971
1972  QualType type = param->getType();
1973
1974  // For the most part, we just need to load the alloca, except:
1975  // 1) aggregate r-values are actually pointers to temporaries, and
1976  // 2) references to non-scalars are pointers directly to the aggregate.
1977  // I don't know why references to scalars are different here.
1978  if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
1979    if (!hasScalarEvaluationKind(ref->getPointeeType()))
1980      return args.add(RValue::getAggregate(local), type);
1981
1982    // Locals which are references to scalars are represented
1983    // with allocas holding the pointer.
1984    return args.add(RValue::get(Builder.CreateLoad(local)), type);
1985  }
1986
1987  if (isInAllocaArgument(CGM.getCXXABI(), type)) {
1988    AggValueSlot Slot = createPlaceholderSlot(*this, type);
1989    Slot.setExternallyDestructed();
1990
1991    // FIXME: Either emit a copy constructor call, or figure out how to do
1992    // guaranteed tail calls with perfect forwarding in LLVM.
1993    CGM.ErrorUnsupported(param, "non-trivial argument copy for thunk");
1994    EmitNullInitialization(Slot.getAddr(), type);
1995
1996    RValue RV = Slot.asRValue();
1997    args.add(RV, type);
1998    return;
1999  }
2000
2001  args.add(convertTempToRValue(local, type, loc), type);
2002}
2003
2004static bool isProvablyNull(llvm::Value *addr) {
2005  return isa<llvm::ConstantPointerNull>(addr);
2006}
2007
2008static bool isProvablyNonNull(llvm::Value *addr) {
2009  return isa<llvm::AllocaInst>(addr);
2010}
2011
2012/// Emit the actual writing-back of a writeback.
2013static void emitWriteback(CodeGenFunction &CGF,
2014                          const CallArgList::Writeback &writeback) {
2015  const LValue &srcLV = writeback.Source;
2016  llvm::Value *srcAddr = srcLV.getAddress();
2017  assert(!isProvablyNull(srcAddr) &&
2018         "shouldn't have writeback for provably null argument");
2019
2020  llvm::BasicBlock *contBB = nullptr;
2021
2022  // If the argument wasn't provably non-null, we need to null check
2023  // before doing the store.
2024  bool provablyNonNull = isProvablyNonNull(srcAddr);
2025  if (!provablyNonNull) {
2026    llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
2027    contBB = CGF.createBasicBlock("icr.done");
2028
2029    llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
2030    CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
2031    CGF.EmitBlock(writebackBB);
2032  }
2033
2034  // Load the value to writeback.
2035  llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
2036
2037  // Cast it back, in case we're writing an id to a Foo* or something.
2038  value = CGF.Builder.CreateBitCast(value,
2039               cast<llvm::PointerType>(srcAddr->getType())->getElementType(),
2040                            "icr.writeback-cast");
2041
2042  // Perform the writeback.
2043
2044  // If we have a "to use" value, it's something we need to emit a use
2045  // of.  This has to be carefully threaded in: if it's done after the
2046  // release it's potentially undefined behavior (and the optimizer
2047  // will ignore it), and if it happens before the retain then the
2048  // optimizer could move the release there.
2049  if (writeback.ToUse) {
2050    assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
2051
2052    // Retain the new value.  No need to block-copy here:  the block's
2053    // being passed up the stack.
2054    value = CGF.EmitARCRetainNonBlock(value);
2055
2056    // Emit the intrinsic use here.
2057    CGF.EmitARCIntrinsicUse(writeback.ToUse);
2058
2059    // Load the old value (primitively).
2060    llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
2061
2062    // Put the new value in place (primitively).
2063    CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
2064
2065    // Release the old value.
2066    CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
2067
2068  // Otherwise, we can just do a normal lvalue store.
2069  } else {
2070    CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
2071  }
2072
2073  // Jump to the continuation block.
2074  if (!provablyNonNull)
2075    CGF.EmitBlock(contBB);
2076}
2077
2078static void emitWritebacks(CodeGenFunction &CGF,
2079                           const CallArgList &args) {
2080  for (const auto &I : args.writebacks())
2081    emitWriteback(CGF, I);
2082}
2083
2084static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
2085                                            const CallArgList &CallArgs) {
2086  assert(CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee());
2087  ArrayRef<CallArgList::CallArgCleanup> Cleanups =
2088    CallArgs.getCleanupsToDeactivate();
2089  // Iterate in reverse to increase the likelihood of popping the cleanup.
2090  for (ArrayRef<CallArgList::CallArgCleanup>::reverse_iterator
2091         I = Cleanups.rbegin(), E = Cleanups.rend(); I != E; ++I) {
2092    CGF.DeactivateCleanupBlock(I->Cleanup, I->IsActiveIP);
2093    I->IsActiveIP->eraseFromParent();
2094  }
2095}
2096
2097static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
2098  if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
2099    if (uop->getOpcode() == UO_AddrOf)
2100      return uop->getSubExpr();
2101  return nullptr;
2102}
2103
2104/// Emit an argument that's being passed call-by-writeback.  That is,
2105/// we are passing the address of
2106static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
2107                             const ObjCIndirectCopyRestoreExpr *CRE) {
2108  LValue srcLV;
2109
2110  // Make an optimistic effort to emit the address as an l-value.
2111  // This can fail if the the argument expression is more complicated.
2112  if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
2113    srcLV = CGF.EmitLValue(lvExpr);
2114
2115  // Otherwise, just emit it as a scalar.
2116  } else {
2117    llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
2118
2119    QualType srcAddrType =
2120      CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
2121    srcLV = CGF.MakeNaturalAlignAddrLValue(srcAddr, srcAddrType);
2122  }
2123  llvm::Value *srcAddr = srcLV.getAddress();
2124
2125  // The dest and src types don't necessarily match in LLVM terms
2126  // because of the crazy ObjC compatibility rules.
2127
2128  llvm::PointerType *destType =
2129    cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
2130
2131  // If the address is a constant null, just pass the appropriate null.
2132  if (isProvablyNull(srcAddr)) {
2133    args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
2134             CRE->getType());
2135    return;
2136  }
2137
2138  // Create the temporary.
2139  llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(),
2140                                           "icr.temp");
2141  // Loading an l-value can introduce a cleanup if the l-value is __weak,
2142  // and that cleanup will be conditional if we can't prove that the l-value
2143  // isn't null, so we need to register a dominating point so that the cleanups
2144  // system will make valid IR.
2145  CodeGenFunction::ConditionalEvaluation condEval(CGF);
2146
2147  // Zero-initialize it if we're not doing a copy-initialization.
2148  bool shouldCopy = CRE->shouldCopy();
2149  if (!shouldCopy) {
2150    llvm::Value *null =
2151      llvm::ConstantPointerNull::get(
2152        cast<llvm::PointerType>(destType->getElementType()));
2153    CGF.Builder.CreateStore(null, temp);
2154  }
2155
2156  llvm::BasicBlock *contBB = nullptr;
2157  llvm::BasicBlock *originBB = nullptr;
2158
2159  // If the address is *not* known to be non-null, we need to switch.
2160  llvm::Value *finalArgument;
2161
2162  bool provablyNonNull = isProvablyNonNull(srcAddr);
2163  if (provablyNonNull) {
2164    finalArgument = temp;
2165  } else {
2166    llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
2167
2168    finalArgument = CGF.Builder.CreateSelect(isNull,
2169                                   llvm::ConstantPointerNull::get(destType),
2170                                             temp, "icr.argument");
2171
2172    // If we need to copy, then the load has to be conditional, which
2173    // means we need control flow.
2174    if (shouldCopy) {
2175      originBB = CGF.Builder.GetInsertBlock();
2176      contBB = CGF.createBasicBlock("icr.cont");
2177      llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
2178      CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
2179      CGF.EmitBlock(copyBB);
2180      condEval.begin(CGF);
2181    }
2182  }
2183
2184  llvm::Value *valueToUse = nullptr;
2185
2186  // Perform a copy if necessary.
2187  if (shouldCopy) {
2188    RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
2189    assert(srcRV.isScalar());
2190
2191    llvm::Value *src = srcRV.getScalarVal();
2192    src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
2193                                    "icr.cast");
2194
2195    // Use an ordinary store, not a store-to-lvalue.
2196    CGF.Builder.CreateStore(src, temp);
2197
2198    // If optimization is enabled, and the value was held in a
2199    // __strong variable, we need to tell the optimizer that this
2200    // value has to stay alive until we're doing the store back.
2201    // This is because the temporary is effectively unretained,
2202    // and so otherwise we can violate the high-level semantics.
2203    if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
2204        srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
2205      valueToUse = src;
2206    }
2207  }
2208
2209  // Finish the control flow if we needed it.
2210  if (shouldCopy && !provablyNonNull) {
2211    llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
2212    CGF.EmitBlock(contBB);
2213
2214    // Make a phi for the value to intrinsically use.
2215    if (valueToUse) {
2216      llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
2217                                                      "icr.to-use");
2218      phiToUse->addIncoming(valueToUse, copyBB);
2219      phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
2220                            originBB);
2221      valueToUse = phiToUse;
2222    }
2223
2224    condEval.end(CGF);
2225  }
2226
2227  args.addWriteback(srcLV, temp, valueToUse);
2228  args.add(RValue::get(finalArgument), CRE->getType());
2229}
2230
2231void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
2232  assert(!StackBase && !StackCleanup.isValid());
2233
2234  // Save the stack.
2235  llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
2236  StackBase = CGF.Builder.CreateCall(F, "inalloca.save");
2237
2238  // Control gets really tied up in landing pads, so we have to spill the
2239  // stacksave to an alloca to avoid violating SSA form.
2240  // TODO: This is dead if we never emit the cleanup.  We should create the
2241  // alloca and store lazily on the first cleanup emission.
2242  StackBaseMem = CGF.CreateTempAlloca(CGF.Int8PtrTy, "inalloca.spmem");
2243  CGF.Builder.CreateStore(StackBase, StackBaseMem);
2244  CGF.pushStackRestore(EHCleanup, StackBaseMem);
2245  StackCleanup = CGF.EHStack.getInnermostEHScope();
2246  assert(StackCleanup.isValid());
2247}
2248
2249void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
2250  if (StackBase) {
2251    CGF.DeactivateCleanupBlock(StackCleanup, StackBase);
2252    llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
2253    // We could load StackBase from StackBaseMem, but in the non-exceptional
2254    // case we can skip it.
2255    CGF.Builder.CreateCall(F, StackBase);
2256  }
2257}
2258
2259void CodeGenFunction::EmitCallArgs(CallArgList &Args,
2260                                   ArrayRef<QualType> ArgTypes,
2261                                   CallExpr::const_arg_iterator ArgBeg,
2262                                   CallExpr::const_arg_iterator ArgEnd,
2263                                   bool ForceColumnInfo) {
2264  CGDebugInfo *DI = getDebugInfo();
2265  SourceLocation CallLoc;
2266  if (DI) CallLoc = DI->getLocation();
2267
2268  // We *have* to evaluate arguments from right to left in the MS C++ ABI,
2269  // because arguments are destroyed left to right in the callee.
2270  if (CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2271    // Insert a stack save if we're going to need any inalloca args.
2272    bool HasInAllocaArgs = false;
2273    for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
2274         I != E && !HasInAllocaArgs; ++I)
2275      HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
2276    if (HasInAllocaArgs) {
2277      assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
2278      Args.allocateArgumentMemory(*this);
2279    }
2280
2281    // Evaluate each argument.
2282    size_t CallArgsStart = Args.size();
2283    for (int I = ArgTypes.size() - 1; I >= 0; --I) {
2284      CallExpr::const_arg_iterator Arg = ArgBeg + I;
2285      EmitCallArg(Args, *Arg, ArgTypes[I]);
2286      // Restore the debug location.
2287      if (DI) DI->EmitLocation(Builder, CallLoc, ForceColumnInfo);
2288    }
2289
2290    // Un-reverse the arguments we just evaluated so they match up with the LLVM
2291    // IR function.
2292    std::reverse(Args.begin() + CallArgsStart, Args.end());
2293    return;
2294  }
2295
2296  for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
2297    CallExpr::const_arg_iterator Arg = ArgBeg + I;
2298    assert(Arg != ArgEnd);
2299    EmitCallArg(Args, *Arg, ArgTypes[I]);
2300    // Restore the debug location.
2301    if (DI) DI->EmitLocation(Builder, CallLoc, ForceColumnInfo);
2302  }
2303}
2304
2305namespace {
2306
2307struct DestroyUnpassedArg : EHScopeStack::Cleanup {
2308  DestroyUnpassedArg(llvm::Value *Addr, QualType Ty)
2309      : Addr(Addr), Ty(Ty) {}
2310
2311  llvm::Value *Addr;
2312  QualType Ty;
2313
2314  void Emit(CodeGenFunction &CGF, Flags flags) override {
2315    const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
2316    assert(!Dtor->isTrivial());
2317    CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
2318                              /*Delegating=*/false, Addr);
2319  }
2320};
2321
2322}
2323
2324void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
2325                                  QualType type) {
2326  if (const ObjCIndirectCopyRestoreExpr *CRE
2327        = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
2328    assert(getLangOpts().ObjCAutoRefCount);
2329    assert(getContext().hasSameType(E->getType(), type));
2330    return emitWritebackArg(*this, args, CRE);
2331  }
2332
2333  assert(type->isReferenceType() == E->isGLValue() &&
2334         "reference binding to unmaterialized r-value!");
2335
2336  if (E->isGLValue()) {
2337    assert(E->getObjectKind() == OK_Ordinary);
2338    return args.add(EmitReferenceBindingToExpr(E), type);
2339  }
2340
2341  bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
2342
2343  // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
2344  // However, we still have to push an EH-only cleanup in case we unwind before
2345  // we make it to the call.
2346  if (HasAggregateEvalKind &&
2347      CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2348    // If we're using inalloca, use the argument memory.  Otherwise, use a
2349    // temporary.
2350    AggValueSlot Slot;
2351    if (args.isUsingInAlloca())
2352      Slot = createPlaceholderSlot(*this, type);
2353    else
2354      Slot = CreateAggTemp(type, "agg.tmp");
2355
2356    const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
2357    bool DestroyedInCallee =
2358        RD && RD->hasNonTrivialDestructor() &&
2359        CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default;
2360    if (DestroyedInCallee)
2361      Slot.setExternallyDestructed();
2362
2363    EmitAggExpr(E, Slot);
2364    RValue RV = Slot.asRValue();
2365    args.add(RV, type);
2366
2367    if (DestroyedInCallee) {
2368      // Create a no-op GEP between the placeholder and the cleanup so we can
2369      // RAUW it successfully.  It also serves as a marker of the first
2370      // instruction where the cleanup is active.
2371      pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddr(), type);
2372      // This unreachable is a temporary marker which will be removed later.
2373      llvm::Instruction *IsActive = Builder.CreateUnreachable();
2374      args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
2375    }
2376    return;
2377  }
2378
2379  if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
2380      cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
2381    LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
2382    assert(L.isSimple());
2383    if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) {
2384      args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
2385    } else {
2386      // We can't represent a misaligned lvalue in the CallArgList, so copy
2387      // to an aligned temporary now.
2388      llvm::Value *tmp = CreateMemTemp(type);
2389      EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile(),
2390                        L.getAlignment());
2391      args.add(RValue::getAggregate(tmp), type);
2392    }
2393    return;
2394  }
2395
2396  args.add(EmitAnyExprToTemp(E), type);
2397}
2398
2399// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
2400// optimizer it can aggressively ignore unwind edges.
2401void
2402CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
2403  if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
2404      !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
2405    Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
2406                      CGM.getNoObjCARCExceptionsMetadata());
2407}
2408
2409/// Emits a call to the given no-arguments nounwind runtime function.
2410llvm::CallInst *
2411CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
2412                                         const llvm::Twine &name) {
2413  return EmitNounwindRuntimeCall(callee, ArrayRef<llvm::Value*>(), name);
2414}
2415
2416/// Emits a call to the given nounwind runtime function.
2417llvm::CallInst *
2418CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
2419                                         ArrayRef<llvm::Value*> args,
2420                                         const llvm::Twine &name) {
2421  llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
2422  call->setDoesNotThrow();
2423  return call;
2424}
2425
2426/// Emits a simple call (never an invoke) to the given no-arguments
2427/// runtime function.
2428llvm::CallInst *
2429CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
2430                                 const llvm::Twine &name) {
2431  return EmitRuntimeCall(callee, ArrayRef<llvm::Value*>(), name);
2432}
2433
2434/// Emits a simple call (never an invoke) to the given runtime
2435/// function.
2436llvm::CallInst *
2437CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
2438                                 ArrayRef<llvm::Value*> args,
2439                                 const llvm::Twine &name) {
2440  llvm::CallInst *call = Builder.CreateCall(callee, args, name);
2441  call->setCallingConv(getRuntimeCC());
2442  return call;
2443}
2444
2445/// Emits a call or invoke to the given noreturn runtime function.
2446void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
2447                                               ArrayRef<llvm::Value*> args) {
2448  if (getInvokeDest()) {
2449    llvm::InvokeInst *invoke =
2450      Builder.CreateInvoke(callee,
2451                           getUnreachableBlock(),
2452                           getInvokeDest(),
2453                           args);
2454    invoke->setDoesNotReturn();
2455    invoke->setCallingConv(getRuntimeCC());
2456  } else {
2457    llvm::CallInst *call = Builder.CreateCall(callee, args);
2458    call->setDoesNotReturn();
2459    call->setCallingConv(getRuntimeCC());
2460    Builder.CreateUnreachable();
2461  }
2462  PGO.setCurrentRegionUnreachable();
2463}
2464
2465/// Emits a call or invoke instruction to the given nullary runtime
2466/// function.
2467llvm::CallSite
2468CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
2469                                         const Twine &name) {
2470  return EmitRuntimeCallOrInvoke(callee, ArrayRef<llvm::Value*>(), name);
2471}
2472
2473/// Emits a call or invoke instruction to the given runtime function.
2474llvm::CallSite
2475CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
2476                                         ArrayRef<llvm::Value*> args,
2477                                         const Twine &name) {
2478  llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
2479  callSite.setCallingConv(getRuntimeCC());
2480  return callSite;
2481}
2482
2483llvm::CallSite
2484CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
2485                                  const Twine &Name) {
2486  return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name);
2487}
2488
2489/// Emits a call or invoke instruction to the given function, depending
2490/// on the current state of the EH stack.
2491llvm::CallSite
2492CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
2493                                  ArrayRef<llvm::Value *> Args,
2494                                  const Twine &Name) {
2495  llvm::BasicBlock *InvokeDest = getInvokeDest();
2496
2497  llvm::Instruction *Inst;
2498  if (!InvokeDest)
2499    Inst = Builder.CreateCall(Callee, Args, Name);
2500  else {
2501    llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
2502    Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name);
2503    EmitBlock(ContBB);
2504  }
2505
2506  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
2507  // optimizer it can aggressively ignore unwind edges.
2508  if (CGM.getLangOpts().ObjCAutoRefCount)
2509    AddObjCARCExceptionMetadata(Inst);
2510
2511  return Inst;
2512}
2513
2514static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo,
2515                            llvm::FunctionType *FTy) {
2516  if (ArgNo < FTy->getNumParams())
2517    assert(Elt->getType() == FTy->getParamType(ArgNo));
2518  else
2519    assert(FTy->isVarArg());
2520  ++ArgNo;
2521}
2522
2523void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
2524                                       SmallVectorImpl<llvm::Value *> &Args,
2525                                       llvm::FunctionType *IRFuncTy) {
2526  if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
2527    unsigned NumElts = AT->getSize().getZExtValue();
2528    QualType EltTy = AT->getElementType();
2529    llvm::Value *Addr = RV.getAggregateAddr();
2530    for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
2531      llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
2532      RValue EltRV = convertTempToRValue(EltAddr, EltTy, SourceLocation());
2533      ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy);
2534    }
2535  } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
2536    RecordDecl *RD = RT->getDecl();
2537    assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
2538    LValue LV = MakeAddrLValue(RV.getAggregateAddr(), Ty);
2539
2540    if (RD->isUnion()) {
2541      const FieldDecl *LargestFD = nullptr;
2542      CharUnits UnionSize = CharUnits::Zero();
2543
2544      for (const auto *FD : RD->fields()) {
2545        assert(!FD->isBitField() &&
2546               "Cannot expand structure with bit-field members.");
2547        CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
2548        if (UnionSize < FieldSize) {
2549          UnionSize = FieldSize;
2550          LargestFD = FD;
2551        }
2552      }
2553      if (LargestFD) {
2554        RValue FldRV = EmitRValueForField(LV, LargestFD, SourceLocation());
2555        ExpandTypeToArgs(LargestFD->getType(), FldRV, Args, IRFuncTy);
2556      }
2557    } else {
2558      for (const auto *FD : RD->fields()) {
2559        RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
2560        ExpandTypeToArgs(FD->getType(), FldRV, Args, IRFuncTy);
2561      }
2562    }
2563  } else if (Ty->isAnyComplexType()) {
2564    ComplexPairTy CV = RV.getComplexVal();
2565    Args.push_back(CV.first);
2566    Args.push_back(CV.second);
2567  } else {
2568    assert(RV.isScalar() &&
2569           "Unexpected non-scalar rvalue during struct expansion.");
2570
2571    // Insert a bitcast as needed.
2572    llvm::Value *V = RV.getScalarVal();
2573    if (Args.size() < IRFuncTy->getNumParams() &&
2574        V->getType() != IRFuncTy->getParamType(Args.size()))
2575      V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size()));
2576
2577    Args.push_back(V);
2578  }
2579}
2580
2581/// \brief Store a non-aggregate value to an address to initialize it.  For
2582/// initialization, a non-atomic store will be used.
2583static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src,
2584                                        LValue Dst) {
2585  if (Src.isScalar())
2586    CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true);
2587  else
2588    CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true);
2589}
2590
2591void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
2592                                                  llvm::Value *New) {
2593  DeferredReplacements.push_back(std::make_pair(Old, New));
2594}
2595
2596RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
2597                                 llvm::Value *Callee,
2598                                 ReturnValueSlot ReturnValue,
2599                                 const CallArgList &CallArgs,
2600                                 const Decl *TargetDecl,
2601                                 llvm::Instruction **callOrInvoke) {
2602  // FIXME: We no longer need the types from CallArgs; lift up and simplify.
2603  SmallVector<llvm::Value*, 16> Args;
2604
2605  // Handle struct-return functions by passing a pointer to the
2606  // location that we would like to return into.
2607  QualType RetTy = CallInfo.getReturnType();
2608  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
2609
2610  // IRArgNo - Keep track of the argument number in the callee we're looking at.
2611  unsigned IRArgNo = 0;
2612  llvm::FunctionType *IRFuncTy =
2613    cast<llvm::FunctionType>(
2614                  cast<llvm::PointerType>(Callee->getType())->getElementType());
2615
2616  // If we're using inalloca, insert the allocation after the stack save.
2617  // FIXME: Do this earlier rather than hacking it in here!
2618  llvm::Value *ArgMemory = nullptr;
2619  if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
2620    llvm::Instruction *IP = CallArgs.getStackBase();
2621    llvm::AllocaInst *AI;
2622    if (IP) {
2623      IP = IP->getNextNode();
2624      AI = new llvm::AllocaInst(ArgStruct, "argmem", IP);
2625    } else {
2626      AI = CreateTempAlloca(ArgStruct, "argmem");
2627    }
2628    AI->setUsedWithInAlloca(true);
2629    assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
2630    ArgMemory = AI;
2631  }
2632
2633  // If the call returns a temporary with struct return, create a temporary
2634  // alloca to hold the result, unless one is given to us.
2635  llvm::Value *SRetPtr = nullptr;
2636  bool SwapThisWithSRet = false;
2637  if (RetAI.isIndirect() || RetAI.isInAlloca()) {
2638    SRetPtr = ReturnValue.getValue();
2639    if (!SRetPtr)
2640      SRetPtr = CreateMemTemp(RetTy);
2641    if (RetAI.isIndirect()) {
2642      Args.push_back(SRetPtr);
2643      SwapThisWithSRet = RetAI.isSRetAfterThis();
2644      if (SwapThisWithSRet)
2645        IRArgNo = 1;
2646      checkArgMatches(SRetPtr, IRArgNo, IRFuncTy);
2647      if (SwapThisWithSRet)
2648        IRArgNo = 0;
2649    } else {
2650      llvm::Value *Addr =
2651          Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex());
2652      Builder.CreateStore(SRetPtr, Addr);
2653    }
2654  }
2655
2656  assert(CallInfo.arg_size() == CallArgs.size() &&
2657         "Mismatch between function signature & arguments.");
2658  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
2659  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
2660       I != E; ++I, ++info_it) {
2661    const ABIArgInfo &ArgInfo = info_it->info;
2662    RValue RV = I->RV;
2663
2664    // Skip 'sret' if it came second.
2665    if (IRArgNo == 1 && SwapThisWithSRet)
2666      ++IRArgNo;
2667
2668    CharUnits TypeAlign = getContext().getTypeAlignInChars(I->Ty);
2669
2670    // Insert a padding argument to ensure proper alignment.
2671    if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) {
2672      Args.push_back(llvm::UndefValue::get(PaddingType));
2673      ++IRArgNo;
2674    }
2675
2676    switch (ArgInfo.getKind()) {
2677    case ABIArgInfo::InAlloca: {
2678      assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
2679      if (RV.isAggregate()) {
2680        // Replace the placeholder with the appropriate argument slot GEP.
2681        llvm::Instruction *Placeholder =
2682            cast<llvm::Instruction>(RV.getAggregateAddr());
2683        CGBuilderTy::InsertPoint IP = Builder.saveIP();
2684        Builder.SetInsertPoint(Placeholder);
2685        llvm::Value *Addr = Builder.CreateStructGEP(
2686            ArgMemory, ArgInfo.getInAllocaFieldIndex());
2687        Builder.restoreIP(IP);
2688        deferPlaceholderReplacement(Placeholder, Addr);
2689      } else {
2690        // Store the RValue into the argument struct.
2691        llvm::Value *Addr =
2692            Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
2693        unsigned AS = Addr->getType()->getPointerAddressSpace();
2694        llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
2695        // There are some cases where a trivial bitcast is not avoidable.  The
2696        // definition of a type later in a translation unit may change it's type
2697        // from {}* to (%struct.foo*)*.
2698        if (Addr->getType() != MemType)
2699          Addr = Builder.CreateBitCast(Addr, MemType);
2700        LValue argLV = MakeAddrLValue(Addr, I->Ty, TypeAlign);
2701        EmitInitStoreOfNonAggregate(*this, RV, argLV);
2702      }
2703      break; // Don't increment IRArgNo!
2704    }
2705
2706    case ABIArgInfo::Indirect: {
2707      if (RV.isScalar() || RV.isComplex()) {
2708        // Make a temporary alloca to pass the argument.
2709        llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
2710        if (ArgInfo.getIndirectAlign() > AI->getAlignment())
2711          AI->setAlignment(ArgInfo.getIndirectAlign());
2712        Args.push_back(AI);
2713
2714        LValue argLV = MakeAddrLValue(Args.back(), I->Ty, TypeAlign);
2715        EmitInitStoreOfNonAggregate(*this, RV, argLV);
2716
2717        // Validate argument match.
2718        checkArgMatches(AI, IRArgNo, IRFuncTy);
2719      } else {
2720        // We want to avoid creating an unnecessary temporary+copy here;
2721        // however, we need one in three cases:
2722        // 1. If the argument is not byval, and we are required to copy the
2723        //    source.  (This case doesn't occur on any common architecture.)
2724        // 2. If the argument is byval, RV is not sufficiently aligned, and
2725        //    we cannot force it to be sufficiently aligned.
2726        // 3. If the argument is byval, but RV is located in an address space
2727        //    different than that of the argument (0).
2728        llvm::Value *Addr = RV.getAggregateAddr();
2729        unsigned Align = ArgInfo.getIndirectAlign();
2730        const llvm::DataLayout *TD = &CGM.getDataLayout();
2731        const unsigned RVAddrSpace = Addr->getType()->getPointerAddressSpace();
2732        const unsigned ArgAddrSpace = (IRArgNo < IRFuncTy->getNumParams() ?
2733          IRFuncTy->getParamType(IRArgNo)->getPointerAddressSpace() : 0);
2734        if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
2735            (ArgInfo.getIndirectByVal() && TypeAlign.getQuantity() < Align &&
2736             llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align) ||
2737             (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) {
2738          // Create an aligned temporary, and copy to it.
2739          llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
2740          if (Align > AI->getAlignment())
2741            AI->setAlignment(Align);
2742          Args.push_back(AI);
2743          EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
2744
2745          // Validate argument match.
2746          checkArgMatches(AI, IRArgNo, IRFuncTy);
2747        } else {
2748          // Skip the extra memcpy call.
2749          Args.push_back(Addr);
2750
2751          // Validate argument match.
2752          checkArgMatches(Addr, IRArgNo, IRFuncTy);
2753        }
2754      }
2755      break;
2756    }
2757
2758    case ABIArgInfo::Ignore:
2759      break;
2760
2761    case ABIArgInfo::Extend:
2762    case ABIArgInfo::Direct: {
2763      if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
2764          ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
2765          ArgInfo.getDirectOffset() == 0) {
2766        llvm::Value *V;
2767        if (RV.isScalar())
2768          V = RV.getScalarVal();
2769        else
2770          V = Builder.CreateLoad(RV.getAggregateAddr());
2771
2772        // If the argument doesn't match, perform a bitcast to coerce it.  This
2773        // can happen due to trivial type mismatches.
2774        if (IRArgNo < IRFuncTy->getNumParams() &&
2775            V->getType() != IRFuncTy->getParamType(IRArgNo))
2776          V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo));
2777        Args.push_back(V);
2778
2779        checkArgMatches(V, IRArgNo, IRFuncTy);
2780        break;
2781      }
2782
2783      // FIXME: Avoid the conversion through memory if possible.
2784      llvm::Value *SrcPtr;
2785      if (RV.isScalar() || RV.isComplex()) {
2786        SrcPtr = CreateMemTemp(I->Ty, "coerce");
2787        LValue SrcLV = MakeAddrLValue(SrcPtr, I->Ty, TypeAlign);
2788        EmitInitStoreOfNonAggregate(*this, RV, SrcLV);
2789      } else
2790        SrcPtr = RV.getAggregateAddr();
2791
2792      // If the value is offset in memory, apply the offset now.
2793      if (unsigned Offs = ArgInfo.getDirectOffset()) {
2794        SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
2795        SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
2796        SrcPtr = Builder.CreateBitCast(SrcPtr,
2797                       llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
2798
2799      }
2800
2801      // If the coerce-to type is a first class aggregate, we flatten it and
2802      // pass the elements. Either way is semantically identical, but fast-isel
2803      // and the optimizer generally likes scalar values better than FCAs.
2804      // We cannot do this for functions using the AAPCS calling convention,
2805      // as structures are treated differently by that calling convention.
2806      llvm::StructType *STy =
2807            dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
2808      if (STy && !isAAPCSVFP(CallInfo, getTarget())) {
2809        llvm::Type *SrcTy =
2810          cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
2811        uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
2812        uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
2813
2814        // If the source type is smaller than the destination type of the
2815        // coerce-to logic, copy the source value into a temp alloca the size
2816        // of the destination type to allow loading all of it. The bits past
2817        // the source value are left undef.
2818        if (SrcSize < DstSize) {
2819          llvm::AllocaInst *TempAlloca
2820            = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce");
2821          Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0);
2822          SrcPtr = TempAlloca;
2823        } else {
2824          SrcPtr = Builder.CreateBitCast(SrcPtr,
2825                                         llvm::PointerType::getUnqual(STy));
2826        }
2827
2828        for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2829          llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
2830          llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
2831          // We don't know what we're loading from.
2832          LI->setAlignment(1);
2833          Args.push_back(LI);
2834
2835          // Validate argument match.
2836          checkArgMatches(LI, IRArgNo, IRFuncTy);
2837        }
2838      } else {
2839        // In the simple case, just pass the coerced loaded value.
2840        Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
2841                                         *this));
2842
2843        // Validate argument match.
2844        checkArgMatches(Args.back(), IRArgNo, IRFuncTy);
2845      }
2846
2847      break;
2848    }
2849
2850    case ABIArgInfo::Expand:
2851      ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy);
2852      IRArgNo = Args.size();
2853      break;
2854    }
2855  }
2856
2857  if (SwapThisWithSRet)
2858    std::swap(Args[0], Args[1]);
2859
2860  if (ArgMemory) {
2861    llvm::Value *Arg = ArgMemory;
2862    if (CallInfo.isVariadic()) {
2863      // When passing non-POD arguments by value to variadic functions, we will
2864      // end up with a variadic prototype and an inalloca call site.  In such
2865      // cases, we can't do any parameter mismatch checks.  Give up and bitcast
2866      // the callee.
2867      unsigned CalleeAS =
2868          cast<llvm::PointerType>(Callee->getType())->getAddressSpace();
2869      Callee = Builder.CreateBitCast(
2870          Callee, getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS));
2871    } else {
2872      llvm::Type *LastParamTy =
2873          IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
2874      if (Arg->getType() != LastParamTy) {
2875#ifndef NDEBUG
2876        // Assert that these structs have equivalent element types.
2877        llvm::StructType *FullTy = CallInfo.getArgStruct();
2878        llvm::StructType *DeclaredTy = cast<llvm::StructType>(
2879            cast<llvm::PointerType>(LastParamTy)->getElementType());
2880        assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
2881        for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
2882                                                DE = DeclaredTy->element_end(),
2883                                                FI = FullTy->element_begin();
2884             DI != DE; ++DI, ++FI)
2885          assert(*DI == *FI);
2886#endif
2887        Arg = Builder.CreateBitCast(Arg, LastParamTy);
2888      }
2889    }
2890    Args.push_back(Arg);
2891  }
2892
2893  if (!CallArgs.getCleanupsToDeactivate().empty())
2894    deactivateArgCleanupsBeforeCall(*this, CallArgs);
2895
2896  // If the callee is a bitcast of a function to a varargs pointer to function
2897  // type, check to see if we can remove the bitcast.  This handles some cases
2898  // with unprototyped functions.
2899  if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
2900    if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
2901      llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
2902      llvm::FunctionType *CurFT =
2903        cast<llvm::FunctionType>(CurPT->getElementType());
2904      llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
2905
2906      if (CE->getOpcode() == llvm::Instruction::BitCast &&
2907          ActualFT->getReturnType() == CurFT->getReturnType() &&
2908          ActualFT->getNumParams() == CurFT->getNumParams() &&
2909          ActualFT->getNumParams() == Args.size() &&
2910          (CurFT->isVarArg() || !ActualFT->isVarArg())) {
2911        bool ArgsMatch = true;
2912        for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
2913          if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
2914            ArgsMatch = false;
2915            break;
2916          }
2917
2918        // Strip the cast if we can get away with it.  This is a nice cleanup,
2919        // but also allows us to inline the function at -O0 if it is marked
2920        // always_inline.
2921        if (ArgsMatch)
2922          Callee = CalleeF;
2923      }
2924    }
2925
2926  unsigned CallingConv;
2927  CodeGen::AttributeListType AttributeList;
2928  CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList,
2929                             CallingConv, true);
2930  llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(),
2931                                                     AttributeList);
2932
2933  llvm::BasicBlock *InvokeDest = nullptr;
2934  if (!Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex,
2935                          llvm::Attribute::NoUnwind))
2936    InvokeDest = getInvokeDest();
2937
2938  llvm::CallSite CS;
2939  if (!InvokeDest) {
2940    CS = Builder.CreateCall(Callee, Args);
2941  } else {
2942    llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
2943    CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args);
2944    EmitBlock(Cont);
2945  }
2946  if (callOrInvoke)
2947    *callOrInvoke = CS.getInstruction();
2948
2949  if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
2950      !CS.hasFnAttr(llvm::Attribute::NoInline))
2951    Attrs =
2952        Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
2953                           llvm::Attribute::AlwaysInline);
2954
2955  CS.setAttributes(Attrs);
2956  CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
2957
2958  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
2959  // optimizer it can aggressively ignore unwind edges.
2960  if (CGM.getLangOpts().ObjCAutoRefCount)
2961    AddObjCARCExceptionMetadata(CS.getInstruction());
2962
2963  // If the call doesn't return, finish the basic block and clear the
2964  // insertion point; this allows the rest of IRgen to discard
2965  // unreachable code.
2966  if (CS.doesNotReturn()) {
2967    Builder.CreateUnreachable();
2968    Builder.ClearInsertionPoint();
2969
2970    // FIXME: For now, emit a dummy basic block because expr emitters in
2971    // generally are not ready to handle emitting expressions at unreachable
2972    // points.
2973    EnsureInsertPoint();
2974
2975    // Return a reasonable RValue.
2976    return GetUndefRValue(RetTy);
2977  }
2978
2979  llvm::Instruction *CI = CS.getInstruction();
2980  if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
2981    CI->setName("call");
2982
2983  // Emit any writebacks immediately.  Arguably this should happen
2984  // after any return-value munging.
2985  if (CallArgs.hasWritebacks())
2986    emitWritebacks(*this, CallArgs);
2987
2988  // The stack cleanup for inalloca arguments has to run out of the normal
2989  // lexical order, so deactivate it and run it manually here.
2990  CallArgs.freeArgumentMemory(*this);
2991
2992  switch (RetAI.getKind()) {
2993  case ABIArgInfo::InAlloca:
2994  case ABIArgInfo::Indirect:
2995    return convertTempToRValue(SRetPtr, RetTy, SourceLocation());
2996
2997  case ABIArgInfo::Ignore:
2998    // If we are ignoring an argument that had a result, make sure to
2999    // construct the appropriate return value for our caller.
3000    return GetUndefRValue(RetTy);
3001
3002  case ABIArgInfo::Extend:
3003  case ABIArgInfo::Direct: {
3004    llvm::Type *RetIRTy = ConvertType(RetTy);
3005    if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
3006      switch (getEvaluationKind(RetTy)) {
3007      case TEK_Complex: {
3008        llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
3009        llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
3010        return RValue::getComplex(std::make_pair(Real, Imag));
3011      }
3012      case TEK_Aggregate: {
3013        llvm::Value *DestPtr = ReturnValue.getValue();
3014        bool DestIsVolatile = ReturnValue.isVolatile();
3015
3016        if (!DestPtr) {
3017          DestPtr = CreateMemTemp(RetTy, "agg.tmp");
3018          DestIsVolatile = false;
3019        }
3020        BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
3021        return RValue::getAggregate(DestPtr);
3022      }
3023      case TEK_Scalar: {
3024        // If the argument doesn't match, perform a bitcast to coerce it.  This
3025        // can happen due to trivial type mismatches.
3026        llvm::Value *V = CI;
3027        if (V->getType() != RetIRTy)
3028          V = Builder.CreateBitCast(V, RetIRTy);
3029        return RValue::get(V);
3030      }
3031      }
3032      llvm_unreachable("bad evaluation kind");
3033    }
3034
3035    llvm::Value *DestPtr = ReturnValue.getValue();
3036    bool DestIsVolatile = ReturnValue.isVolatile();
3037
3038    if (!DestPtr) {
3039      DestPtr = CreateMemTemp(RetTy, "coerce");
3040      DestIsVolatile = false;
3041    }
3042
3043    // If the value is offset in memory, apply the offset now.
3044    llvm::Value *StorePtr = DestPtr;
3045    if (unsigned Offs = RetAI.getDirectOffset()) {
3046      StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
3047      StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
3048      StorePtr = Builder.CreateBitCast(StorePtr,
3049                         llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
3050    }
3051    CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
3052
3053    return convertTempToRValue(DestPtr, RetTy, SourceLocation());
3054  }
3055
3056  case ABIArgInfo::Expand:
3057    llvm_unreachable("Invalid ABI kind for return argument");
3058  }
3059
3060  llvm_unreachable("Unhandled ABIArgInfo::Kind");
3061}
3062
3063/* VarArg handling */
3064
3065llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
3066  return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
3067}
3068