1//===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This contains code dealing with code generation of C++ expressions
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CGCUDARuntime.h"
16#include "CGCXXABI.h"
17#include "CGDebugInfo.h"
18#include "CGObjCRuntime.h"
19#include "clang/CodeGen/CGFunctionInfo.h"
20#include "clang/Frontend/CodeGenOptions.h"
21#include "llvm/IR/CallSite.h"
22#include "llvm/IR/Intrinsics.h"
23
24using namespace clang;
25using namespace CodeGen;
26
27RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
28                                          SourceLocation CallLoc,
29                                          llvm::Value *Callee,
30                                          ReturnValueSlot ReturnValue,
31                                          llvm::Value *This,
32                                          llvm::Value *ImplicitParam,
33                                          QualType ImplicitParamTy,
34                                          CallExpr::const_arg_iterator ArgBeg,
35                                          CallExpr::const_arg_iterator ArgEnd) {
36  assert(MD->isInstance() &&
37         "Trying to emit a member call expr on a static method!");
38
39  // C++11 [class.mfct.non-static]p2:
40  //   If a non-static member function of a class X is called for an object that
41  //   is not of type X, or of a type derived from X, the behavior is undefined.
42  EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall
43                                            : TCK_MemberCall,
44                CallLoc, This, getContext().getRecordType(MD->getParent()));
45
46  CallArgList Args;
47
48  // Push the this ptr.
49  Args.add(RValue::get(This), MD->getThisType(getContext()));
50
51  // If there is an implicit parameter (e.g. VTT), emit it.
52  if (ImplicitParam) {
53    Args.add(RValue::get(ImplicitParam), ImplicitParamTy);
54  }
55
56  const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
57  RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size());
58
59  // And the rest of the call args.
60  EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
61
62  return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
63                  Callee, ReturnValue, Args, MD);
64}
65
66static CXXRecordDecl *getCXXRecord(const Expr *E) {
67  QualType T = E->getType();
68  if (const PointerType *PTy = T->getAs<PointerType>())
69    T = PTy->getPointeeType();
70  const RecordType *Ty = T->castAs<RecordType>();
71  return cast<CXXRecordDecl>(Ty->getDecl());
72}
73
74// Note: This function also emit constructor calls to support a MSVC
75// extensions allowing explicit constructor function call.
76RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
77                                              ReturnValueSlot ReturnValue) {
78  const Expr *callee = CE->getCallee()->IgnoreParens();
79
80  if (isa<BinaryOperator>(callee))
81    return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
82
83  const MemberExpr *ME = cast<MemberExpr>(callee);
84  const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
85
86  if (MD->isStatic()) {
87    // The method is static, emit it as we would a regular call.
88    llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
89    return EmitCall(getContext().getPointerType(MD->getType()), Callee,
90                    CE->getLocStart(), ReturnValue, CE->arg_begin(),
91                    CE->arg_end());
92  }
93
94  // Compute the object pointer.
95  const Expr *Base = ME->getBase();
96  bool CanUseVirtualCall = MD->isVirtual() && !ME->hasQualifier();
97
98  const CXXMethodDecl *DevirtualizedMethod = nullptr;
99  if (CanUseVirtualCall && CanDevirtualizeMemberFunctionCall(Base, MD)) {
100    const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType();
101    DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl);
102    assert(DevirtualizedMethod);
103    const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent();
104    const Expr *Inner = Base->ignoreParenBaseCasts();
105    if (getCXXRecord(Inner) == DevirtualizedClass)
106      // If the class of the Inner expression is where the dynamic method
107      // is defined, build the this pointer from it.
108      Base = Inner;
109    else if (getCXXRecord(Base) != DevirtualizedClass) {
110      // If the method is defined in a class that is not the best dynamic
111      // one or the one of the full expression, we would have to build
112      // a derived-to-base cast to compute the correct this pointer, but
113      // we don't have support for that yet, so do a virtual call.
114      DevirtualizedMethod = nullptr;
115    }
116    // If the return types are not the same, this might be a case where more
117    // code needs to run to compensate for it. For example, the derived
118    // method might return a type that inherits form from the return
119    // type of MD and has a prefix.
120    // For now we just avoid devirtualizing these covariant cases.
121    if (DevirtualizedMethod &&
122        DevirtualizedMethod->getReturnType().getCanonicalType() !=
123            MD->getReturnType().getCanonicalType())
124      DevirtualizedMethod = nullptr;
125  }
126
127  llvm::Value *This;
128  if (ME->isArrow())
129    This = EmitScalarExpr(Base);
130  else
131    This = EmitLValue(Base).getAddress();
132
133
134  if (MD->isTrivial()) {
135    if (isa<CXXDestructorDecl>(MD)) return RValue::get(nullptr);
136    if (isa<CXXConstructorDecl>(MD) &&
137        cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
138      return RValue::get(nullptr);
139
140    if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
141      // We don't like to generate the trivial copy/move assignment operator
142      // when it isn't necessary; just produce the proper effect here.
143      llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
144      EmitAggregateAssign(This, RHS, CE->getType());
145      return RValue::get(This);
146    }
147
148    if (isa<CXXConstructorDecl>(MD) &&
149        cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
150      // Trivial move and copy ctor are the same.
151      llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
152      EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS,
153                                     CE->arg_begin(), CE->arg_end());
154      return RValue::get(This);
155    }
156    llvm_unreachable("unknown trivial member function");
157  }
158
159  // Compute the function type we're calling.
160  const CXXMethodDecl *CalleeDecl = DevirtualizedMethod ? DevirtualizedMethod : MD;
161  const CGFunctionInfo *FInfo = nullptr;
162  if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl))
163    FInfo = &CGM.getTypes().arrangeCXXDestructor(Dtor,
164                                                 Dtor_Complete);
165  else if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(CalleeDecl))
166    FInfo = &CGM.getTypes().arrangeCXXConstructorDeclaration(Ctor,
167                                                             Ctor_Complete);
168  else
169    FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl);
170
171  llvm::FunctionType *Ty = CGM.getTypes().GetFunctionType(*FInfo);
172
173  // C++ [class.virtual]p12:
174  //   Explicit qualification with the scope operator (5.1) suppresses the
175  //   virtual call mechanism.
176  //
177  // We also don't emit a virtual call if the base expression has a record type
178  // because then we know what the type is.
179  bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod;
180  llvm::Value *Callee;
181
182  if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
183    assert(CE->arg_begin() == CE->arg_end() &&
184           "Destructor shouldn't have explicit parameters");
185    assert(ReturnValue.isNull() && "Destructor shouldn't have return value");
186    if (UseVirtualCall) {
187      CGM.getCXXABI().EmitVirtualDestructorCall(*this, Dtor, Dtor_Complete,
188                                                CE->getExprLoc(), This);
189    } else {
190      if (getLangOpts().AppleKext &&
191          MD->isVirtual() &&
192          ME->hasQualifier())
193        Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
194      else if (!DevirtualizedMethod)
195        Callee = CGM.GetAddrOfCXXDestructor(Dtor, Dtor_Complete, FInfo, Ty);
196      else {
197        const CXXDestructorDecl *DDtor =
198          cast<CXXDestructorDecl>(DevirtualizedMethod);
199        Callee = CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty);
200      }
201      EmitCXXMemberCall(MD, CE->getExprLoc(), Callee, ReturnValue, This,
202                        /*ImplicitParam=*/nullptr, QualType(), nullptr,nullptr);
203    }
204    return RValue::get(nullptr);
205  }
206
207  if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) {
208    Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
209  } else if (UseVirtualCall) {
210    Callee = CGM.getCXXABI().getVirtualFunctionPointer(*this, MD, This, Ty);
211  } else {
212    if (getLangOpts().AppleKext &&
213        MD->isVirtual() &&
214        ME->hasQualifier())
215      Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
216    else if (!DevirtualizedMethod)
217      Callee = CGM.GetAddrOfFunction(MD, Ty);
218    else {
219      Callee = CGM.GetAddrOfFunction(DevirtualizedMethod, Ty);
220    }
221  }
222
223  if (MD->isVirtual()) {
224    This = CGM.getCXXABI().adjustThisArgumentForVirtualFunctionCall(
225        *this, MD, This, UseVirtualCall);
226  }
227
228  return EmitCXXMemberCall(MD, CE->getExprLoc(), Callee, ReturnValue, This,
229                           /*ImplicitParam=*/nullptr, QualType(),
230                           CE->arg_begin(), CE->arg_end());
231}
232
233RValue
234CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
235                                              ReturnValueSlot ReturnValue) {
236  const BinaryOperator *BO =
237      cast<BinaryOperator>(E->getCallee()->IgnoreParens());
238  const Expr *BaseExpr = BO->getLHS();
239  const Expr *MemFnExpr = BO->getRHS();
240
241  const MemberPointerType *MPT =
242    MemFnExpr->getType()->castAs<MemberPointerType>();
243
244  const FunctionProtoType *FPT =
245    MPT->getPointeeType()->castAs<FunctionProtoType>();
246  const CXXRecordDecl *RD =
247    cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
248
249  // Get the member function pointer.
250  llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
251
252  // Emit the 'this' pointer.
253  llvm::Value *This;
254
255  if (BO->getOpcode() == BO_PtrMemI)
256    This = EmitScalarExpr(BaseExpr);
257  else
258    This = EmitLValue(BaseExpr).getAddress();
259
260  EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This,
261                QualType(MPT->getClass(), 0));
262
263  // Ask the ABI to load the callee.  Note that This is modified.
264  llvm::Value *Callee =
265    CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, BO, This, MemFnPtr, MPT);
266
267  CallArgList Args;
268
269  QualType ThisType =
270    getContext().getPointerType(getContext().getTagDeclType(RD));
271
272  // Push the this ptr.
273  Args.add(RValue::get(This), ThisType);
274
275  RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1);
276
277  // And the rest of the call args
278  EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
279  return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
280                  Callee, ReturnValue, Args);
281}
282
283RValue
284CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
285                                               const CXXMethodDecl *MD,
286                                               ReturnValueSlot ReturnValue) {
287  assert(MD->isInstance() &&
288         "Trying to emit a member call expr on a static method!");
289  LValue LV = EmitLValue(E->getArg(0));
290  llvm::Value *This = LV.getAddress();
291
292  if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
293      MD->isTrivial()) {
294    llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
295    QualType Ty = E->getType();
296    EmitAggregateAssign(This, Src, Ty);
297    return RValue::get(This);
298  }
299
300  llvm::Value *Callee = EmitCXXOperatorMemberCallee(E, MD, This);
301  return EmitCXXMemberCall(MD, E->getExprLoc(), Callee, ReturnValue, This,
302                           /*ImplicitParam=*/nullptr, QualType(),
303                           E->arg_begin() + 1, E->arg_end());
304}
305
306RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
307                                               ReturnValueSlot ReturnValue) {
308  return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue);
309}
310
311static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
312                                            llvm::Value *DestPtr,
313                                            const CXXRecordDecl *Base) {
314  if (Base->isEmpty())
315    return;
316
317  DestPtr = CGF.EmitCastToVoidPtr(DestPtr);
318
319  const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
320  CharUnits Size = Layout.getNonVirtualSize();
321  CharUnits Align = Layout.getNonVirtualAlignment();
322
323  llvm::Value *SizeVal = CGF.CGM.getSize(Size);
324
325  // If the type contains a pointer to data member we can't memset it to zero.
326  // Instead, create a null constant and copy it to the destination.
327  // TODO: there are other patterns besides zero that we can usefully memset,
328  // like -1, which happens to be the pattern used by member-pointers.
329  // TODO: isZeroInitializable can be over-conservative in the case where a
330  // virtual base contains a member pointer.
331  if (!CGF.CGM.getTypes().isZeroInitializable(Base)) {
332    llvm::Constant *NullConstant = CGF.CGM.EmitNullConstantForBase(Base);
333
334    llvm::GlobalVariable *NullVariable =
335      new llvm::GlobalVariable(CGF.CGM.getModule(), NullConstant->getType(),
336                               /*isConstant=*/true,
337                               llvm::GlobalVariable::PrivateLinkage,
338                               NullConstant, Twine());
339    NullVariable->setAlignment(Align.getQuantity());
340    llvm::Value *SrcPtr = CGF.EmitCastToVoidPtr(NullVariable);
341
342    // Get and call the appropriate llvm.memcpy overload.
343    CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity());
344    return;
345  }
346
347  // Otherwise, just memset the whole thing to zero.  This is legal
348  // because in LLVM, all default initializers (other than the ones we just
349  // handled above) are guaranteed to have a bit pattern of all zeros.
350  CGF.Builder.CreateMemSet(DestPtr, CGF.Builder.getInt8(0), SizeVal,
351                           Align.getQuantity());
352}
353
354void
355CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
356                                      AggValueSlot Dest) {
357  assert(!Dest.isIgnored() && "Must have a destination!");
358  const CXXConstructorDecl *CD = E->getConstructor();
359
360  // If we require zero initialization before (or instead of) calling the
361  // constructor, as can be the case with a non-user-provided default
362  // constructor, emit the zero initialization now, unless destination is
363  // already zeroed.
364  if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
365    switch (E->getConstructionKind()) {
366    case CXXConstructExpr::CK_Delegating:
367    case CXXConstructExpr::CK_Complete:
368      EmitNullInitialization(Dest.getAddr(), E->getType());
369      break;
370    case CXXConstructExpr::CK_VirtualBase:
371    case CXXConstructExpr::CK_NonVirtualBase:
372      EmitNullBaseClassInitialization(*this, Dest.getAddr(), CD->getParent());
373      break;
374    }
375  }
376
377  // If this is a call to a trivial default constructor, do nothing.
378  if (CD->isTrivial() && CD->isDefaultConstructor())
379    return;
380
381  // Elide the constructor if we're constructing from a temporary.
382  // The temporary check is required because Sema sets this on NRVO
383  // returns.
384  if (getLangOpts().ElideConstructors && E->isElidable()) {
385    assert(getContext().hasSameUnqualifiedType(E->getType(),
386                                               E->getArg(0)->getType()));
387    if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
388      EmitAggExpr(E->getArg(0), Dest);
389      return;
390    }
391  }
392
393  if (const ConstantArrayType *arrayType
394        = getContext().getAsConstantArrayType(E->getType())) {
395    EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddr(),
396                               E->arg_begin(), E->arg_end());
397  } else {
398    CXXCtorType Type = Ctor_Complete;
399    bool ForVirtualBase = false;
400    bool Delegating = false;
401
402    switch (E->getConstructionKind()) {
403     case CXXConstructExpr::CK_Delegating:
404      // We should be emitting a constructor; GlobalDecl will assert this
405      Type = CurGD.getCtorType();
406      Delegating = true;
407      break;
408
409     case CXXConstructExpr::CK_Complete:
410      Type = Ctor_Complete;
411      break;
412
413     case CXXConstructExpr::CK_VirtualBase:
414      ForVirtualBase = true;
415      // fall-through
416
417     case CXXConstructExpr::CK_NonVirtualBase:
418      Type = Ctor_Base;
419    }
420
421    // Call the constructor.
422    EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest.getAddr(),
423                           E->arg_begin(), E->arg_end());
424  }
425}
426
427void
428CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
429                                            llvm::Value *Src,
430                                            const Expr *Exp) {
431  if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
432    Exp = E->getSubExpr();
433  assert(isa<CXXConstructExpr>(Exp) &&
434         "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
435  const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
436  const CXXConstructorDecl *CD = E->getConstructor();
437  RunCleanupsScope Scope(*this);
438
439  // If we require zero initialization before (or instead of) calling the
440  // constructor, as can be the case with a non-user-provided default
441  // constructor, emit the zero initialization now.
442  // FIXME. Do I still need this for a copy ctor synthesis?
443  if (E->requiresZeroInitialization())
444    EmitNullInitialization(Dest, E->getType());
445
446  assert(!getContext().getAsConstantArrayType(E->getType())
447         && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
448  EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, E->arg_begin(), E->arg_end());
449}
450
451static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
452                                        const CXXNewExpr *E) {
453  if (!E->isArray())
454    return CharUnits::Zero();
455
456  // No cookie is required if the operator new[] being used is the
457  // reserved placement operator new[].
458  if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
459    return CharUnits::Zero();
460
461  return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
462}
463
464static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
465                                        const CXXNewExpr *e,
466                                        unsigned minElements,
467                                        llvm::Value *&numElements,
468                                        llvm::Value *&sizeWithoutCookie) {
469  QualType type = e->getAllocatedType();
470
471  if (!e->isArray()) {
472    CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
473    sizeWithoutCookie
474      = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
475    return sizeWithoutCookie;
476  }
477
478  // The width of size_t.
479  unsigned sizeWidth = CGF.SizeTy->getBitWidth();
480
481  // Figure out the cookie size.
482  llvm::APInt cookieSize(sizeWidth,
483                         CalculateCookiePadding(CGF, e).getQuantity());
484
485  // Emit the array size expression.
486  // We multiply the size of all dimensions for NumElements.
487  // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
488  numElements = CGF.EmitScalarExpr(e->getArraySize());
489  assert(isa<llvm::IntegerType>(numElements->getType()));
490
491  // The number of elements can be have an arbitrary integer type;
492  // essentially, we need to multiply it by a constant factor, add a
493  // cookie size, and verify that the result is representable as a
494  // size_t.  That's just a gloss, though, and it's wrong in one
495  // important way: if the count is negative, it's an error even if
496  // the cookie size would bring the total size >= 0.
497  bool isSigned
498    = e->getArraySize()->getType()->isSignedIntegerOrEnumerationType();
499  llvm::IntegerType *numElementsType
500    = cast<llvm::IntegerType>(numElements->getType());
501  unsigned numElementsWidth = numElementsType->getBitWidth();
502
503  // Compute the constant factor.
504  llvm::APInt arraySizeMultiplier(sizeWidth, 1);
505  while (const ConstantArrayType *CAT
506             = CGF.getContext().getAsConstantArrayType(type)) {
507    type = CAT->getElementType();
508    arraySizeMultiplier *= CAT->getSize();
509  }
510
511  CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
512  llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
513  typeSizeMultiplier *= arraySizeMultiplier;
514
515  // This will be a size_t.
516  llvm::Value *size;
517
518  // If someone is doing 'new int[42]' there is no need to do a dynamic check.
519  // Don't bloat the -O0 code.
520  if (llvm::ConstantInt *numElementsC =
521        dyn_cast<llvm::ConstantInt>(numElements)) {
522    const llvm::APInt &count = numElementsC->getValue();
523
524    bool hasAnyOverflow = false;
525
526    // If 'count' was a negative number, it's an overflow.
527    if (isSigned && count.isNegative())
528      hasAnyOverflow = true;
529
530    // We want to do all this arithmetic in size_t.  If numElements is
531    // wider than that, check whether it's already too big, and if so,
532    // overflow.
533    else if (numElementsWidth > sizeWidth &&
534             numElementsWidth - sizeWidth > count.countLeadingZeros())
535      hasAnyOverflow = true;
536
537    // Okay, compute a count at the right width.
538    llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
539
540    // If there is a brace-initializer, we cannot allocate fewer elements than
541    // there are initializers. If we do, that's treated like an overflow.
542    if (adjustedCount.ult(minElements))
543      hasAnyOverflow = true;
544
545    // Scale numElements by that.  This might overflow, but we don't
546    // care because it only overflows if allocationSize does, too, and
547    // if that overflows then we shouldn't use this.
548    numElements = llvm::ConstantInt::get(CGF.SizeTy,
549                                         adjustedCount * arraySizeMultiplier);
550
551    // Compute the size before cookie, and track whether it overflowed.
552    bool overflow;
553    llvm::APInt allocationSize
554      = adjustedCount.umul_ov(typeSizeMultiplier, overflow);
555    hasAnyOverflow |= overflow;
556
557    // Add in the cookie, and check whether it's overflowed.
558    if (cookieSize != 0) {
559      // Save the current size without a cookie.  This shouldn't be
560      // used if there was overflow.
561      sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
562
563      allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
564      hasAnyOverflow |= overflow;
565    }
566
567    // On overflow, produce a -1 so operator new will fail.
568    if (hasAnyOverflow) {
569      size = llvm::Constant::getAllOnesValue(CGF.SizeTy);
570    } else {
571      size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
572    }
573
574  // Otherwise, we might need to use the overflow intrinsics.
575  } else {
576    // There are up to five conditions we need to test for:
577    // 1) if isSigned, we need to check whether numElements is negative;
578    // 2) if numElementsWidth > sizeWidth, we need to check whether
579    //   numElements is larger than something representable in size_t;
580    // 3) if minElements > 0, we need to check whether numElements is smaller
581    //    than that.
582    // 4) we need to compute
583    //      sizeWithoutCookie := numElements * typeSizeMultiplier
584    //    and check whether it overflows; and
585    // 5) if we need a cookie, we need to compute
586    //      size := sizeWithoutCookie + cookieSize
587    //    and check whether it overflows.
588
589    llvm::Value *hasOverflow = nullptr;
590
591    // If numElementsWidth > sizeWidth, then one way or another, we're
592    // going to have to do a comparison for (2), and this happens to
593    // take care of (1), too.
594    if (numElementsWidth > sizeWidth) {
595      llvm::APInt threshold(numElementsWidth, 1);
596      threshold <<= sizeWidth;
597
598      llvm::Value *thresholdV
599        = llvm::ConstantInt::get(numElementsType, threshold);
600
601      hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);
602      numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);
603
604    // Otherwise, if we're signed, we want to sext up to size_t.
605    } else if (isSigned) {
606      if (numElementsWidth < sizeWidth)
607        numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);
608
609      // If there's a non-1 type size multiplier, then we can do the
610      // signedness check at the same time as we do the multiply
611      // because a negative number times anything will cause an
612      // unsigned overflow.  Otherwise, we have to do it here. But at least
613      // in this case, we can subsume the >= minElements check.
614      if (typeSizeMultiplier == 1)
615        hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
616                              llvm::ConstantInt::get(CGF.SizeTy, minElements));
617
618    // Otherwise, zext up to size_t if necessary.
619    } else if (numElementsWidth < sizeWidth) {
620      numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);
621    }
622
623    assert(numElements->getType() == CGF.SizeTy);
624
625    if (minElements) {
626      // Don't allow allocation of fewer elements than we have initializers.
627      if (!hasOverflow) {
628        hasOverflow = CGF.Builder.CreateICmpULT(numElements,
629                              llvm::ConstantInt::get(CGF.SizeTy, minElements));
630      } else if (numElementsWidth > sizeWidth) {
631        // The other existing overflow subsumes this check.
632        // We do an unsigned comparison, since any signed value < -1 is
633        // taken care of either above or below.
634        hasOverflow = CGF.Builder.CreateOr(hasOverflow,
635                          CGF.Builder.CreateICmpULT(numElements,
636                              llvm::ConstantInt::get(CGF.SizeTy, minElements)));
637      }
638    }
639
640    size = numElements;
641
642    // Multiply by the type size if necessary.  This multiplier
643    // includes all the factors for nested arrays.
644    //
645    // This step also causes numElements to be scaled up by the
646    // nested-array factor if necessary.  Overflow on this computation
647    // can be ignored because the result shouldn't be used if
648    // allocation fails.
649    if (typeSizeMultiplier != 1) {
650      llvm::Value *umul_with_overflow
651        = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
652
653      llvm::Value *tsmV =
654        llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
655      llvm::Value *result =
656        CGF.Builder.CreateCall2(umul_with_overflow, size, tsmV);
657
658      llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
659      if (hasOverflow)
660        hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
661      else
662        hasOverflow = overflowed;
663
664      size = CGF.Builder.CreateExtractValue(result, 0);
665
666      // Also scale up numElements by the array size multiplier.
667      if (arraySizeMultiplier != 1) {
668        // If the base element type size is 1, then we can re-use the
669        // multiply we just did.
670        if (typeSize.isOne()) {
671          assert(arraySizeMultiplier == typeSizeMultiplier);
672          numElements = size;
673
674        // Otherwise we need a separate multiply.
675        } else {
676          llvm::Value *asmV =
677            llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
678          numElements = CGF.Builder.CreateMul(numElements, asmV);
679        }
680      }
681    } else {
682      // numElements doesn't need to be scaled.
683      assert(arraySizeMultiplier == 1);
684    }
685
686    // Add in the cookie size if necessary.
687    if (cookieSize != 0) {
688      sizeWithoutCookie = size;
689
690      llvm::Value *uadd_with_overflow
691        = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
692
693      llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
694      llvm::Value *result =
695        CGF.Builder.CreateCall2(uadd_with_overflow, size, cookieSizeV);
696
697      llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
698      if (hasOverflow)
699        hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
700      else
701        hasOverflow = overflowed;
702
703      size = CGF.Builder.CreateExtractValue(result, 0);
704    }
705
706    // If we had any possibility of dynamic overflow, make a select to
707    // overwrite 'size' with an all-ones value, which should cause
708    // operator new to throw.
709    if (hasOverflow)
710      size = CGF.Builder.CreateSelect(hasOverflow,
711                                 llvm::Constant::getAllOnesValue(CGF.SizeTy),
712                                      size);
713  }
714
715  if (cookieSize == 0)
716    sizeWithoutCookie = size;
717  else
718    assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
719
720  return size;
721}
722
723static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
724                                    QualType AllocType, llvm::Value *NewPtr) {
725  // FIXME: Refactor with EmitExprAsInit.
726  CharUnits Alignment = CGF.getContext().getTypeAlignInChars(AllocType);
727  switch (CGF.getEvaluationKind(AllocType)) {
728  case TEK_Scalar:
729    CGF.EmitScalarInit(Init, nullptr, CGF.MakeAddrLValue(NewPtr, AllocType,
730                                                         Alignment),
731                       false);
732    return;
733  case TEK_Complex:
734    CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType,
735                                                           Alignment),
736                                  /*isInit*/ true);
737    return;
738  case TEK_Aggregate: {
739    AggValueSlot Slot
740      = AggValueSlot::forAddr(NewPtr, Alignment, AllocType.getQualifiers(),
741                              AggValueSlot::IsDestructed,
742                              AggValueSlot::DoesNotNeedGCBarriers,
743                              AggValueSlot::IsNotAliased);
744    CGF.EmitAggExpr(Init, Slot);
745    return;
746  }
747  }
748  llvm_unreachable("bad evaluation kind");
749}
750
751void
752CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
753                                         QualType ElementType,
754                                         llvm::Value *BeginPtr,
755                                         llvm::Value *NumElements,
756                                         llvm::Value *AllocSizeWithoutCookie) {
757  // If we have a type with trivial initialization and no initializer,
758  // there's nothing to do.
759  if (!E->hasInitializer())
760    return;
761
762  llvm::Value *CurPtr = BeginPtr;
763
764  unsigned InitListElements = 0;
765
766  const Expr *Init = E->getInitializer();
767  llvm::AllocaInst *EndOfInit = nullptr;
768  QualType::DestructionKind DtorKind = ElementType.isDestructedType();
769  EHScopeStack::stable_iterator Cleanup;
770  llvm::Instruction *CleanupDominator = nullptr;
771
772  // If the initializer is an initializer list, first do the explicit elements.
773  if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
774    InitListElements = ILE->getNumInits();
775
776    // If this is a multi-dimensional array new, we will initialize multiple
777    // elements with each init list element.
778    QualType AllocType = E->getAllocatedType();
779    if (const ConstantArrayType *CAT = dyn_cast_or_null<ConstantArrayType>(
780            AllocType->getAsArrayTypeUnsafe())) {
781      unsigned AS = CurPtr->getType()->getPointerAddressSpace();
782      llvm::Type *AllocPtrTy = ConvertTypeForMem(AllocType)->getPointerTo(AS);
783      CurPtr = Builder.CreateBitCast(CurPtr, AllocPtrTy);
784      InitListElements *= getContext().getConstantArrayElementCount(CAT);
785    }
786
787    // Enter a partial-destruction Cleanup if necessary.
788    if (needsEHCleanup(DtorKind)) {
789      // In principle we could tell the Cleanup where we are more
790      // directly, but the control flow can get so varied here that it
791      // would actually be quite complex.  Therefore we go through an
792      // alloca.
793      EndOfInit = CreateTempAlloca(BeginPtr->getType(), "array.init.end");
794      CleanupDominator = Builder.CreateStore(BeginPtr, EndOfInit);
795      pushIrregularPartialArrayCleanup(BeginPtr, EndOfInit, ElementType,
796                                       getDestroyer(DtorKind));
797      Cleanup = EHStack.stable_begin();
798    }
799
800    for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) {
801      // Tell the cleanup that it needs to destroy up to this
802      // element.  TODO: some of these stores can be trivially
803      // observed to be unnecessary.
804      if (EndOfInit)
805        Builder.CreateStore(Builder.CreateBitCast(CurPtr, BeginPtr->getType()),
806                            EndOfInit);
807      // FIXME: If the last initializer is an incomplete initializer list for
808      // an array, and we have an array filler, we can fold together the two
809      // initialization loops.
810      StoreAnyExprIntoOneUnit(*this, ILE->getInit(i),
811                              ILE->getInit(i)->getType(), CurPtr);
812      CurPtr = Builder.CreateConstInBoundsGEP1_32(CurPtr, 1, "array.exp.next");
813    }
814
815    // The remaining elements are filled with the array filler expression.
816    Init = ILE->getArrayFiller();
817
818    // Extract the initializer for the individual array elements by pulling
819    // out the array filler from all the nested initializer lists. This avoids
820    // generating a nested loop for the initialization.
821    while (Init && Init->getType()->isConstantArrayType()) {
822      auto *SubILE = dyn_cast<InitListExpr>(Init);
823      if (!SubILE)
824        break;
825      assert(SubILE->getNumInits() == 0 && "explicit inits in array filler?");
826      Init = SubILE->getArrayFiller();
827    }
828
829    // Switch back to initializing one base element at a time.
830    CurPtr = Builder.CreateBitCast(CurPtr, BeginPtr->getType());
831  }
832
833  // Attempt to perform zero-initialization using memset.
834  auto TryMemsetInitialization = [&]() -> bool {
835    // FIXME: If the type is a pointer-to-data-member under the Itanium ABI,
836    // we can initialize with a memset to -1.
837    if (!CGM.getTypes().isZeroInitializable(ElementType))
838      return false;
839
840    // Optimization: since zero initialization will just set the memory
841    // to all zeroes, generate a single memset to do it in one shot.
842
843    // Subtract out the size of any elements we've already initialized.
844    auto *RemainingSize = AllocSizeWithoutCookie;
845    if (InitListElements) {
846      // We know this can't overflow; we check this when doing the allocation.
847      auto *InitializedSize = llvm::ConstantInt::get(
848          RemainingSize->getType(),
849          getContext().getTypeSizeInChars(ElementType).getQuantity() *
850              InitListElements);
851      RemainingSize = Builder.CreateSub(RemainingSize, InitializedSize);
852    }
853
854    // Create the memset.
855    CharUnits Alignment = getContext().getTypeAlignInChars(ElementType);
856    Builder.CreateMemSet(CurPtr, Builder.getInt8(0), RemainingSize,
857                         Alignment.getQuantity(), false);
858    return true;
859  };
860
861  // If all elements have already been initialized, skip any further
862  // initialization.
863  llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
864  if (ConstNum && ConstNum->getZExtValue() <= InitListElements) {
865    // If there was a Cleanup, deactivate it.
866    if (CleanupDominator)
867      DeactivateCleanupBlock(Cleanup, CleanupDominator);
868    return;
869  }
870
871  assert(Init && "have trailing elements to initialize but no initializer");
872
873  // If this is a constructor call, try to optimize it out, and failing that
874  // emit a single loop to initialize all remaining elements.
875  if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init)) {
876    CXXConstructorDecl *Ctor = CCE->getConstructor();
877    if (Ctor->isTrivial()) {
878      // If new expression did not specify value-initialization, then there
879      // is no initialization.
880      if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty())
881        return;
882
883      if (TryMemsetInitialization())
884        return;
885    }
886
887    // Store the new Cleanup position for irregular Cleanups.
888    //
889    // FIXME: Share this cleanup with the constructor call emission rather than
890    // having it create a cleanup of its own.
891    if (EndOfInit) Builder.CreateStore(CurPtr, EndOfInit);
892
893    // Emit a constructor call loop to initialize the remaining elements.
894    if (InitListElements)
895      NumElements = Builder.CreateSub(
896          NumElements,
897          llvm::ConstantInt::get(NumElements->getType(), InitListElements));
898    EmitCXXAggrConstructorCall(Ctor, NumElements, CurPtr,
899                               CCE->arg_begin(), CCE->arg_end(),
900                               CCE->requiresZeroInitialization());
901    return;
902  }
903
904  // If this is value-initialization, we can usually use memset.
905  ImplicitValueInitExpr IVIE(ElementType);
906  if (isa<ImplicitValueInitExpr>(Init)) {
907    if (TryMemsetInitialization())
908      return;
909
910    // Switch to an ImplicitValueInitExpr for the element type. This handles
911    // only one case: multidimensional array new of pointers to members. In
912    // all other cases, we already have an initializer for the array element.
913    Init = &IVIE;
914  }
915
916  // At this point we should have found an initializer for the individual
917  // elements of the array.
918  assert(getContext().hasSameUnqualifiedType(ElementType, Init->getType()) &&
919         "got wrong type of element to initialize");
920
921  // If we have an empty initializer list, we can usually use memset.
922  if (auto *ILE = dyn_cast<InitListExpr>(Init))
923    if (ILE->getNumInits() == 0 && TryMemsetInitialization())
924      return;
925
926  // Create the loop blocks.
927  llvm::BasicBlock *EntryBB = Builder.GetInsertBlock();
928  llvm::BasicBlock *LoopBB = createBasicBlock("new.loop");
929  llvm::BasicBlock *ContBB = createBasicBlock("new.loop.end");
930
931  // Find the end of the array, hoisted out of the loop.
932  llvm::Value *EndPtr =
933    Builder.CreateInBoundsGEP(BeginPtr, NumElements, "array.end");
934
935  // If the number of elements isn't constant, we have to now check if there is
936  // anything left to initialize.
937  if (!ConstNum) {
938    llvm::Value *IsEmpty = Builder.CreateICmpEQ(CurPtr, EndPtr,
939                                                "array.isempty");
940    Builder.CreateCondBr(IsEmpty, ContBB, LoopBB);
941  }
942
943  // Enter the loop.
944  EmitBlock(LoopBB);
945
946  // Set up the current-element phi.
947  llvm::PHINode *CurPtrPhi =
948    Builder.CreatePHI(CurPtr->getType(), 2, "array.cur");
949  CurPtrPhi->addIncoming(CurPtr, EntryBB);
950  CurPtr = CurPtrPhi;
951
952  // Store the new Cleanup position for irregular Cleanups.
953  if (EndOfInit) Builder.CreateStore(CurPtr, EndOfInit);
954
955  // Enter a partial-destruction Cleanup if necessary.
956  if (!CleanupDominator && needsEHCleanup(DtorKind)) {
957    pushRegularPartialArrayCleanup(BeginPtr, CurPtr, ElementType,
958                                   getDestroyer(DtorKind));
959    Cleanup = EHStack.stable_begin();
960    CleanupDominator = Builder.CreateUnreachable();
961  }
962
963  // Emit the initializer into this element.
964  StoreAnyExprIntoOneUnit(*this, Init, Init->getType(), CurPtr);
965
966  // Leave the Cleanup if we entered one.
967  if (CleanupDominator) {
968    DeactivateCleanupBlock(Cleanup, CleanupDominator);
969    CleanupDominator->eraseFromParent();
970  }
971
972  // Advance to the next element by adjusting the pointer type as necessary.
973  llvm::Value *NextPtr =
974      Builder.CreateConstInBoundsGEP1_32(CurPtr, 1, "array.next");
975
976  // Check whether we've gotten to the end of the array and, if so,
977  // exit the loop.
978  llvm::Value *IsEnd = Builder.CreateICmpEQ(NextPtr, EndPtr, "array.atend");
979  Builder.CreateCondBr(IsEnd, ContBB, LoopBB);
980  CurPtrPhi->addIncoming(NextPtr, Builder.GetInsertBlock());
981
982  EmitBlock(ContBB);
983}
984
985static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
986                               QualType ElementType,
987                               llvm::Value *NewPtr,
988                               llvm::Value *NumElements,
989                               llvm::Value *AllocSizeWithoutCookie) {
990  if (E->isArray())
991    CGF.EmitNewArrayInitializer(E, ElementType, NewPtr, NumElements,
992                                AllocSizeWithoutCookie);
993  else if (const Expr *Init = E->getInitializer())
994    StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr);
995}
996
997/// Emit a call to an operator new or operator delete function, as implicitly
998/// created by new-expressions and delete-expressions.
999static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
1000                                const FunctionDecl *Callee,
1001                                const FunctionProtoType *CalleeType,
1002                                const CallArgList &Args) {
1003  llvm::Instruction *CallOrInvoke;
1004  llvm::Value *CalleeAddr = CGF.CGM.GetAddrOfFunction(Callee);
1005  RValue RV =
1006      CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(Args, CalleeType),
1007                   CalleeAddr, ReturnValueSlot(), Args,
1008                   Callee, &CallOrInvoke);
1009
1010  /// C++1y [expr.new]p10:
1011  ///   [In a new-expression,] an implementation is allowed to omit a call
1012  ///   to a replaceable global allocation function.
1013  ///
1014  /// We model such elidable calls with the 'builtin' attribute.
1015  llvm::Function *Fn = dyn_cast<llvm::Function>(CalleeAddr);
1016  if (Callee->isReplaceableGlobalAllocationFunction() &&
1017      Fn && Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)) {
1018    // FIXME: Add addAttribute to CallSite.
1019    if (llvm::CallInst *CI = dyn_cast<llvm::CallInst>(CallOrInvoke))
1020      CI->addAttribute(llvm::AttributeSet::FunctionIndex,
1021                       llvm::Attribute::Builtin);
1022    else if (llvm::InvokeInst *II = dyn_cast<llvm::InvokeInst>(CallOrInvoke))
1023      II->addAttribute(llvm::AttributeSet::FunctionIndex,
1024                       llvm::Attribute::Builtin);
1025    else
1026      llvm_unreachable("unexpected kind of call instruction");
1027  }
1028
1029  return RV;
1030}
1031
1032RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
1033                                                 const Expr *Arg,
1034                                                 bool IsDelete) {
1035  CallArgList Args;
1036  const Stmt *ArgS = Arg;
1037  EmitCallArgs(Args, *Type->param_type_begin(),
1038               ConstExprIterator(&ArgS), ConstExprIterator(&ArgS + 1));
1039  // Find the allocation or deallocation function that we're calling.
1040  ASTContext &Ctx = getContext();
1041  DeclarationName Name = Ctx.DeclarationNames
1042      .getCXXOperatorName(IsDelete ? OO_Delete : OO_New);
1043  for (auto *Decl : Ctx.getTranslationUnitDecl()->lookup(Name))
1044    if (auto *FD = dyn_cast<FunctionDecl>(Decl))
1045      if (Ctx.hasSameType(FD->getType(), QualType(Type, 0)))
1046        return EmitNewDeleteCall(*this, cast<FunctionDecl>(Decl), Type, Args);
1047  llvm_unreachable("predeclared global operator new/delete is missing");
1048}
1049
1050namespace {
1051  /// A cleanup to call the given 'operator delete' function upon
1052  /// abnormal exit from a new expression.
1053  class CallDeleteDuringNew : public EHScopeStack::Cleanup {
1054    size_t NumPlacementArgs;
1055    const FunctionDecl *OperatorDelete;
1056    llvm::Value *Ptr;
1057    llvm::Value *AllocSize;
1058
1059    RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
1060
1061  public:
1062    static size_t getExtraSize(size_t NumPlacementArgs) {
1063      return NumPlacementArgs * sizeof(RValue);
1064    }
1065
1066    CallDeleteDuringNew(size_t NumPlacementArgs,
1067                        const FunctionDecl *OperatorDelete,
1068                        llvm::Value *Ptr,
1069                        llvm::Value *AllocSize)
1070      : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
1071        Ptr(Ptr), AllocSize(AllocSize) {}
1072
1073    void setPlacementArg(unsigned I, RValue Arg) {
1074      assert(I < NumPlacementArgs && "index out of range");
1075      getPlacementArgs()[I] = Arg;
1076    }
1077
1078    void Emit(CodeGenFunction &CGF, Flags flags) override {
1079      const FunctionProtoType *FPT
1080        = OperatorDelete->getType()->getAs<FunctionProtoType>();
1081      assert(FPT->getNumParams() == NumPlacementArgs + 1 ||
1082             (FPT->getNumParams() == 2 && NumPlacementArgs == 0));
1083
1084      CallArgList DeleteArgs;
1085
1086      // The first argument is always a void*.
1087      FunctionProtoType::param_type_iterator AI = FPT->param_type_begin();
1088      DeleteArgs.add(RValue::get(Ptr), *AI++);
1089
1090      // A member 'operator delete' can take an extra 'size_t' argument.
1091      if (FPT->getNumParams() == NumPlacementArgs + 2)
1092        DeleteArgs.add(RValue::get(AllocSize), *AI++);
1093
1094      // Pass the rest of the arguments, which must match exactly.
1095      for (unsigned I = 0; I != NumPlacementArgs; ++I)
1096        DeleteArgs.add(getPlacementArgs()[I], *AI++);
1097
1098      // Call 'operator delete'.
1099      EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
1100    }
1101  };
1102
1103  /// A cleanup to call the given 'operator delete' function upon
1104  /// abnormal exit from a new expression when the new expression is
1105  /// conditional.
1106  class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup {
1107    size_t NumPlacementArgs;
1108    const FunctionDecl *OperatorDelete;
1109    DominatingValue<RValue>::saved_type Ptr;
1110    DominatingValue<RValue>::saved_type AllocSize;
1111
1112    DominatingValue<RValue>::saved_type *getPlacementArgs() {
1113      return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
1114    }
1115
1116  public:
1117    static size_t getExtraSize(size_t NumPlacementArgs) {
1118      return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
1119    }
1120
1121    CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
1122                                   const FunctionDecl *OperatorDelete,
1123                                   DominatingValue<RValue>::saved_type Ptr,
1124                              DominatingValue<RValue>::saved_type AllocSize)
1125      : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
1126        Ptr(Ptr), AllocSize(AllocSize) {}
1127
1128    void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
1129      assert(I < NumPlacementArgs && "index out of range");
1130      getPlacementArgs()[I] = Arg;
1131    }
1132
1133    void Emit(CodeGenFunction &CGF, Flags flags) override {
1134      const FunctionProtoType *FPT
1135        = OperatorDelete->getType()->getAs<FunctionProtoType>();
1136      assert(FPT->getNumParams() == NumPlacementArgs + 1 ||
1137             (FPT->getNumParams() == 2 && NumPlacementArgs == 0));
1138
1139      CallArgList DeleteArgs;
1140
1141      // The first argument is always a void*.
1142      FunctionProtoType::param_type_iterator AI = FPT->param_type_begin();
1143      DeleteArgs.add(Ptr.restore(CGF), *AI++);
1144
1145      // A member 'operator delete' can take an extra 'size_t' argument.
1146      if (FPT->getNumParams() == NumPlacementArgs + 2) {
1147        RValue RV = AllocSize.restore(CGF);
1148        DeleteArgs.add(RV, *AI++);
1149      }
1150
1151      // Pass the rest of the arguments, which must match exactly.
1152      for (unsigned I = 0; I != NumPlacementArgs; ++I) {
1153        RValue RV = getPlacementArgs()[I].restore(CGF);
1154        DeleteArgs.add(RV, *AI++);
1155      }
1156
1157      // Call 'operator delete'.
1158      EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
1159    }
1160  };
1161}
1162
1163/// Enter a cleanup to call 'operator delete' if the initializer in a
1164/// new-expression throws.
1165static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
1166                                  const CXXNewExpr *E,
1167                                  llvm::Value *NewPtr,
1168                                  llvm::Value *AllocSize,
1169                                  const CallArgList &NewArgs) {
1170  // If we're not inside a conditional branch, then the cleanup will
1171  // dominate and we can do the easier (and more efficient) thing.
1172  if (!CGF.isInConditionalBranch()) {
1173    CallDeleteDuringNew *Cleanup = CGF.EHStack
1174      .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
1175                                                 E->getNumPlacementArgs(),
1176                                                 E->getOperatorDelete(),
1177                                                 NewPtr, AllocSize);
1178    for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
1179      Cleanup->setPlacementArg(I, NewArgs[I+1].RV);
1180
1181    return;
1182  }
1183
1184  // Otherwise, we need to save all this stuff.
1185  DominatingValue<RValue>::saved_type SavedNewPtr =
1186    DominatingValue<RValue>::save(CGF, RValue::get(NewPtr));
1187  DominatingValue<RValue>::saved_type SavedAllocSize =
1188    DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
1189
1190  CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
1191    .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(EHCleanup,
1192                                                 E->getNumPlacementArgs(),
1193                                                 E->getOperatorDelete(),
1194                                                 SavedNewPtr,
1195                                                 SavedAllocSize);
1196  for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
1197    Cleanup->setPlacementArg(I,
1198                     DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV));
1199
1200  CGF.initFullExprCleanup();
1201}
1202
1203llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
1204  // The element type being allocated.
1205  QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
1206
1207  // 1. Build a call to the allocation function.
1208  FunctionDecl *allocator = E->getOperatorNew();
1209  const FunctionProtoType *allocatorType =
1210    allocator->getType()->castAs<FunctionProtoType>();
1211
1212  CallArgList allocatorArgs;
1213
1214  // The allocation size is the first argument.
1215  QualType sizeType = getContext().getSizeType();
1216
1217  // If there is a brace-initializer, cannot allocate fewer elements than inits.
1218  unsigned minElements = 0;
1219  if (E->isArray() && E->hasInitializer()) {
1220    if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer()))
1221      minElements = ILE->getNumInits();
1222  }
1223
1224  llvm::Value *numElements = nullptr;
1225  llvm::Value *allocSizeWithoutCookie = nullptr;
1226  llvm::Value *allocSize =
1227    EmitCXXNewAllocSize(*this, E, minElements, numElements,
1228                        allocSizeWithoutCookie);
1229
1230  allocatorArgs.add(RValue::get(allocSize), sizeType);
1231
1232  // We start at 1 here because the first argument (the allocation size)
1233  // has already been emitted.
1234  EmitCallArgs(allocatorArgs, allocatorType->isVariadic(),
1235               allocatorType->param_type_begin() + 1,
1236               allocatorType->param_type_end(), E->placement_arg_begin(),
1237               E->placement_arg_end());
1238
1239  // Emit the allocation call.  If the allocator is a global placement
1240  // operator, just "inline" it directly.
1241  RValue RV;
1242  if (allocator->isReservedGlobalPlacementOperator()) {
1243    assert(allocatorArgs.size() == 2);
1244    RV = allocatorArgs[1].RV;
1245    // TODO: kill any unnecessary computations done for the size
1246    // argument.
1247  } else {
1248    RV = EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
1249  }
1250
1251  // Emit a null check on the allocation result if the allocation
1252  // function is allowed to return null (because it has a non-throwing
1253  // exception spec; for this part, we inline
1254  // CXXNewExpr::shouldNullCheckAllocation()) and we have an
1255  // interesting initializer.
1256  bool nullCheck = allocatorType->isNothrow(getContext()) &&
1257    (!allocType.isPODType(getContext()) || E->hasInitializer());
1258
1259  llvm::BasicBlock *nullCheckBB = nullptr;
1260  llvm::BasicBlock *contBB = nullptr;
1261
1262  llvm::Value *allocation = RV.getScalarVal();
1263  unsigned AS = allocation->getType()->getPointerAddressSpace();
1264
1265  // The null-check means that the initializer is conditionally
1266  // evaluated.
1267  ConditionalEvaluation conditional(*this);
1268
1269  if (nullCheck) {
1270    conditional.begin(*this);
1271
1272    nullCheckBB = Builder.GetInsertBlock();
1273    llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
1274    contBB = createBasicBlock("new.cont");
1275
1276    llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull");
1277    Builder.CreateCondBr(isNull, contBB, notNullBB);
1278    EmitBlock(notNullBB);
1279  }
1280
1281  // If there's an operator delete, enter a cleanup to call it if an
1282  // exception is thrown.
1283  EHScopeStack::stable_iterator operatorDeleteCleanup;
1284  llvm::Instruction *cleanupDominator = nullptr;
1285  if (E->getOperatorDelete() &&
1286      !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1287    EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs);
1288    operatorDeleteCleanup = EHStack.stable_begin();
1289    cleanupDominator = Builder.CreateUnreachable();
1290  }
1291
1292  assert((allocSize == allocSizeWithoutCookie) ==
1293         CalculateCookiePadding(*this, E).isZero());
1294  if (allocSize != allocSizeWithoutCookie) {
1295    assert(E->isArray());
1296    allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
1297                                                       numElements,
1298                                                       E, allocType);
1299  }
1300
1301  llvm::Type *elementPtrTy
1302    = ConvertTypeForMem(allocType)->getPointerTo(AS);
1303  llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy);
1304
1305  EmitNewInitializer(*this, E, allocType, result, numElements,
1306                     allocSizeWithoutCookie);
1307  if (E->isArray()) {
1308    // NewPtr is a pointer to the base element type.  If we're
1309    // allocating an array of arrays, we'll need to cast back to the
1310    // array pointer type.
1311    llvm::Type *resultType = ConvertTypeForMem(E->getType());
1312    if (result->getType() != resultType)
1313      result = Builder.CreateBitCast(result, resultType);
1314  }
1315
1316  // Deactivate the 'operator delete' cleanup if we finished
1317  // initialization.
1318  if (operatorDeleteCleanup.isValid()) {
1319    DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
1320    cleanupDominator->eraseFromParent();
1321  }
1322
1323  if (nullCheck) {
1324    conditional.end(*this);
1325
1326    llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
1327    EmitBlock(contBB);
1328
1329    llvm::PHINode *PHI = Builder.CreatePHI(result->getType(), 2);
1330    PHI->addIncoming(result, notNullBB);
1331    PHI->addIncoming(llvm::Constant::getNullValue(result->getType()),
1332                     nullCheckBB);
1333
1334    result = PHI;
1335  }
1336
1337  return result;
1338}
1339
1340void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1341                                     llvm::Value *Ptr,
1342                                     QualType DeleteTy) {
1343  assert(DeleteFD->getOverloadedOperator() == OO_Delete);
1344
1345  const FunctionProtoType *DeleteFTy =
1346    DeleteFD->getType()->getAs<FunctionProtoType>();
1347
1348  CallArgList DeleteArgs;
1349
1350  // Check if we need to pass the size to the delete operator.
1351  llvm::Value *Size = nullptr;
1352  QualType SizeTy;
1353  if (DeleteFTy->getNumParams() == 2) {
1354    SizeTy = DeleteFTy->getParamType(1);
1355    CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
1356    Size = llvm::ConstantInt::get(ConvertType(SizeTy),
1357                                  DeleteTypeSize.getQuantity());
1358  }
1359
1360  QualType ArgTy = DeleteFTy->getParamType(0);
1361  llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
1362  DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
1363
1364  if (Size)
1365    DeleteArgs.add(RValue::get(Size), SizeTy);
1366
1367  // Emit the call to delete.
1368  EmitNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs);
1369}
1370
1371namespace {
1372  /// Calls the given 'operator delete' on a single object.
1373  struct CallObjectDelete : EHScopeStack::Cleanup {
1374    llvm::Value *Ptr;
1375    const FunctionDecl *OperatorDelete;
1376    QualType ElementType;
1377
1378    CallObjectDelete(llvm::Value *Ptr,
1379                     const FunctionDecl *OperatorDelete,
1380                     QualType ElementType)
1381      : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1382
1383    void Emit(CodeGenFunction &CGF, Flags flags) override {
1384      CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
1385    }
1386  };
1387}
1388
1389/// Emit the code for deleting a single object.
1390static void EmitObjectDelete(CodeGenFunction &CGF,
1391                             const FunctionDecl *OperatorDelete,
1392                             llvm::Value *Ptr,
1393                             QualType ElementType,
1394                             bool UseGlobalDelete) {
1395  // Find the destructor for the type, if applicable.  If the
1396  // destructor is virtual, we'll just emit the vcall and return.
1397  const CXXDestructorDecl *Dtor = nullptr;
1398  if (const RecordType *RT = ElementType->getAs<RecordType>()) {
1399    CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1400    if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
1401      Dtor = RD->getDestructor();
1402
1403      if (Dtor->isVirtual()) {
1404        if (UseGlobalDelete) {
1405          // If we're supposed to call the global delete, make sure we do so
1406          // even if the destructor throws.
1407
1408          // Derive the complete-object pointer, which is what we need
1409          // to pass to the deallocation function.
1410          llvm::Value *completePtr =
1411            CGF.CGM.getCXXABI().adjustToCompleteObject(CGF, Ptr, ElementType);
1412
1413          CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1414                                                    completePtr, OperatorDelete,
1415                                                    ElementType);
1416        }
1417
1418        // FIXME: Provide a source location here.
1419        CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1420        CGF.CGM.getCXXABI().EmitVirtualDestructorCall(CGF, Dtor, DtorType,
1421                                                      SourceLocation(), Ptr);
1422
1423        if (UseGlobalDelete) {
1424          CGF.PopCleanupBlock();
1425        }
1426
1427        return;
1428      }
1429    }
1430  }
1431
1432  // Make sure that we call delete even if the dtor throws.
1433  // This doesn't have to a conditional cleanup because we're going
1434  // to pop it off in a second.
1435  CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1436                                            Ptr, OperatorDelete, ElementType);
1437
1438  if (Dtor)
1439    CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1440                              /*ForVirtualBase=*/false,
1441                              /*Delegating=*/false,
1442                              Ptr);
1443  else if (CGF.getLangOpts().ObjCAutoRefCount &&
1444           ElementType->isObjCLifetimeType()) {
1445    switch (ElementType.getObjCLifetime()) {
1446    case Qualifiers::OCL_None:
1447    case Qualifiers::OCL_ExplicitNone:
1448    case Qualifiers::OCL_Autoreleasing:
1449      break;
1450
1451    case Qualifiers::OCL_Strong: {
1452      // Load the pointer value.
1453      llvm::Value *PtrValue = CGF.Builder.CreateLoad(Ptr,
1454                                             ElementType.isVolatileQualified());
1455
1456      CGF.EmitARCRelease(PtrValue, ARCPreciseLifetime);
1457      break;
1458    }
1459
1460    case Qualifiers::OCL_Weak:
1461      CGF.EmitARCDestroyWeak(Ptr);
1462      break;
1463    }
1464  }
1465
1466  CGF.PopCleanupBlock();
1467}
1468
1469namespace {
1470  /// Calls the given 'operator delete' on an array of objects.
1471  struct CallArrayDelete : EHScopeStack::Cleanup {
1472    llvm::Value *Ptr;
1473    const FunctionDecl *OperatorDelete;
1474    llvm::Value *NumElements;
1475    QualType ElementType;
1476    CharUnits CookieSize;
1477
1478    CallArrayDelete(llvm::Value *Ptr,
1479                    const FunctionDecl *OperatorDelete,
1480                    llvm::Value *NumElements,
1481                    QualType ElementType,
1482                    CharUnits CookieSize)
1483      : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
1484        ElementType(ElementType), CookieSize(CookieSize) {}
1485
1486    void Emit(CodeGenFunction &CGF, Flags flags) override {
1487      const FunctionProtoType *DeleteFTy =
1488        OperatorDelete->getType()->getAs<FunctionProtoType>();
1489      assert(DeleteFTy->getNumParams() == 1 || DeleteFTy->getNumParams() == 2);
1490
1491      CallArgList Args;
1492
1493      // Pass the pointer as the first argument.
1494      QualType VoidPtrTy = DeleteFTy->getParamType(0);
1495      llvm::Value *DeletePtr
1496        = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
1497      Args.add(RValue::get(DeletePtr), VoidPtrTy);
1498
1499      // Pass the original requested size as the second argument.
1500      if (DeleteFTy->getNumParams() == 2) {
1501        QualType size_t = DeleteFTy->getParamType(1);
1502        llvm::IntegerType *SizeTy
1503          = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
1504
1505        CharUnits ElementTypeSize =
1506          CGF.CGM.getContext().getTypeSizeInChars(ElementType);
1507
1508        // The size of an element, multiplied by the number of elements.
1509        llvm::Value *Size
1510          = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
1511        Size = CGF.Builder.CreateMul(Size, NumElements);
1512
1513        // Plus the size of the cookie if applicable.
1514        if (!CookieSize.isZero()) {
1515          llvm::Value *CookieSizeV
1516            = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
1517          Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
1518        }
1519
1520        Args.add(RValue::get(Size), size_t);
1521      }
1522
1523      // Emit the call to delete.
1524      EmitNewDeleteCall(CGF, OperatorDelete, DeleteFTy, Args);
1525    }
1526  };
1527}
1528
1529/// Emit the code for deleting an array of objects.
1530static void EmitArrayDelete(CodeGenFunction &CGF,
1531                            const CXXDeleteExpr *E,
1532                            llvm::Value *deletedPtr,
1533                            QualType elementType) {
1534  llvm::Value *numElements = nullptr;
1535  llvm::Value *allocatedPtr = nullptr;
1536  CharUnits cookieSize;
1537  CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,
1538                                      numElements, allocatedPtr, cookieSize);
1539
1540  assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
1541
1542  // Make sure that we call delete even if one of the dtors throws.
1543  const FunctionDecl *operatorDelete = E->getOperatorDelete();
1544  CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
1545                                           allocatedPtr, operatorDelete,
1546                                           numElements, elementType,
1547                                           cookieSize);
1548
1549  // Destroy the elements.
1550  if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
1551    assert(numElements && "no element count for a type with a destructor!");
1552
1553    llvm::Value *arrayEnd =
1554      CGF.Builder.CreateInBoundsGEP(deletedPtr, numElements, "delete.end");
1555
1556    // Note that it is legal to allocate a zero-length array, and we
1557    // can never fold the check away because the length should always
1558    // come from a cookie.
1559    CGF.emitArrayDestroy(deletedPtr, arrayEnd, elementType,
1560                         CGF.getDestroyer(dtorKind),
1561                         /*checkZeroLength*/ true,
1562                         CGF.needsEHCleanup(dtorKind));
1563  }
1564
1565  // Pop the cleanup block.
1566  CGF.PopCleanupBlock();
1567}
1568
1569void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
1570  const Expr *Arg = E->getArgument();
1571  llvm::Value *Ptr = EmitScalarExpr(Arg);
1572
1573  // Null check the pointer.
1574  llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
1575  llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
1576
1577  llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull");
1578
1579  Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
1580  EmitBlock(DeleteNotNull);
1581
1582  // We might be deleting a pointer to array.  If so, GEP down to the
1583  // first non-array element.
1584  // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
1585  QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
1586  if (DeleteTy->isConstantArrayType()) {
1587    llvm::Value *Zero = Builder.getInt32(0);
1588    SmallVector<llvm::Value*,8> GEP;
1589
1590    GEP.push_back(Zero); // point at the outermost array
1591
1592    // For each layer of array type we're pointing at:
1593    while (const ConstantArrayType *Arr
1594             = getContext().getAsConstantArrayType(DeleteTy)) {
1595      // 1. Unpeel the array type.
1596      DeleteTy = Arr->getElementType();
1597
1598      // 2. GEP to the first element of the array.
1599      GEP.push_back(Zero);
1600    }
1601
1602    Ptr = Builder.CreateInBoundsGEP(Ptr, GEP, "del.first");
1603  }
1604
1605  assert(ConvertTypeForMem(DeleteTy) ==
1606         cast<llvm::PointerType>(Ptr->getType())->getElementType());
1607
1608  if (E->isArrayForm()) {
1609    EmitArrayDelete(*this, E, Ptr, DeleteTy);
1610  } else {
1611    EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy,
1612                     E->isGlobalDelete());
1613  }
1614
1615  EmitBlock(DeleteEnd);
1616}
1617
1618static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E,
1619                                         llvm::Type *StdTypeInfoPtrTy) {
1620  // Get the vtable pointer.
1621  llvm::Value *ThisPtr = CGF.EmitLValue(E).getAddress();
1622
1623  // C++ [expr.typeid]p2:
1624  //   If the glvalue expression is obtained by applying the unary * operator to
1625  //   a pointer and the pointer is a null pointer value, the typeid expression
1626  //   throws the std::bad_typeid exception.
1627  bool IsDeref = false;
1628  if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E->IgnoreParens()))
1629    if (UO->getOpcode() == UO_Deref)
1630      IsDeref = true;
1631
1632  QualType SrcRecordTy = E->getType();
1633  if (CGF.CGM.getCXXABI().shouldTypeidBeNullChecked(IsDeref, SrcRecordTy)) {
1634    llvm::BasicBlock *BadTypeidBlock =
1635        CGF.createBasicBlock("typeid.bad_typeid");
1636    llvm::BasicBlock *EndBlock = CGF.createBasicBlock("typeid.end");
1637
1638    llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr);
1639    CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
1640
1641    CGF.EmitBlock(BadTypeidBlock);
1642    CGF.CGM.getCXXABI().EmitBadTypeidCall(CGF);
1643    CGF.EmitBlock(EndBlock);
1644  }
1645
1646  return CGF.CGM.getCXXABI().EmitTypeid(CGF, SrcRecordTy, ThisPtr,
1647                                        StdTypeInfoPtrTy);
1648}
1649
1650llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
1651  llvm::Type *StdTypeInfoPtrTy =
1652    ConvertType(E->getType())->getPointerTo();
1653
1654  if (E->isTypeOperand()) {
1655    llvm::Constant *TypeInfo =
1656        CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand(getContext()));
1657    return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
1658  }
1659
1660  // C++ [expr.typeid]p2:
1661  //   When typeid is applied to a glvalue expression whose type is a
1662  //   polymorphic class type, the result refers to a std::type_info object
1663  //   representing the type of the most derived object (that is, the dynamic
1664  //   type) to which the glvalue refers.
1665  if (E->isPotentiallyEvaluated())
1666    return EmitTypeidFromVTable(*this, E->getExprOperand(),
1667                                StdTypeInfoPtrTy);
1668
1669  QualType OperandTy = E->getExprOperand()->getType();
1670  return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
1671                               StdTypeInfoPtrTy);
1672}
1673
1674static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
1675                                          QualType DestTy) {
1676  llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1677  if (DestTy->isPointerType())
1678    return llvm::Constant::getNullValue(DestLTy);
1679
1680  /// C++ [expr.dynamic.cast]p9:
1681  ///   A failed cast to reference type throws std::bad_cast
1682  if (!CGF.CGM.getCXXABI().EmitBadCastCall(CGF))
1683    return nullptr;
1684
1685  CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
1686  return llvm::UndefValue::get(DestLTy);
1687}
1688
1689llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
1690                                              const CXXDynamicCastExpr *DCE) {
1691  QualType DestTy = DCE->getTypeAsWritten();
1692
1693  if (DCE->isAlwaysNull())
1694    if (llvm::Value *T = EmitDynamicCastToNull(*this, DestTy))
1695      return T;
1696
1697  QualType SrcTy = DCE->getSubExpr()->getType();
1698
1699  // C++ [expr.dynamic.cast]p7:
1700  //   If T is "pointer to cv void," then the result is a pointer to the most
1701  //   derived object pointed to by v.
1702  const PointerType *DestPTy = DestTy->getAs<PointerType>();
1703
1704  bool isDynamicCastToVoid;
1705  QualType SrcRecordTy;
1706  QualType DestRecordTy;
1707  if (DestPTy) {
1708    isDynamicCastToVoid = DestPTy->getPointeeType()->isVoidType();
1709    SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
1710    DestRecordTy = DestPTy->getPointeeType();
1711  } else {
1712    isDynamicCastToVoid = false;
1713    SrcRecordTy = SrcTy;
1714    DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
1715  }
1716
1717  assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
1718
1719  // C++ [expr.dynamic.cast]p4:
1720  //   If the value of v is a null pointer value in the pointer case, the result
1721  //   is the null pointer value of type T.
1722  bool ShouldNullCheckSrcValue =
1723      CGM.getCXXABI().shouldDynamicCastCallBeNullChecked(SrcTy->isPointerType(),
1724                                                         SrcRecordTy);
1725
1726  llvm::BasicBlock *CastNull = nullptr;
1727  llvm::BasicBlock *CastNotNull = nullptr;
1728  llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
1729
1730  if (ShouldNullCheckSrcValue) {
1731    CastNull = createBasicBlock("dynamic_cast.null");
1732    CastNotNull = createBasicBlock("dynamic_cast.notnull");
1733
1734    llvm::Value *IsNull = Builder.CreateIsNull(Value);
1735    Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
1736    EmitBlock(CastNotNull);
1737  }
1738
1739  if (isDynamicCastToVoid) {
1740    Value = CGM.getCXXABI().EmitDynamicCastToVoid(*this, Value, SrcRecordTy,
1741                                                  DestTy);
1742  } else {
1743    assert(DestRecordTy->isRecordType() &&
1744           "destination type must be a record type!");
1745    Value = CGM.getCXXABI().EmitDynamicCastCall(*this, Value, SrcRecordTy,
1746                                                DestTy, DestRecordTy, CastEnd);
1747  }
1748
1749  if (ShouldNullCheckSrcValue) {
1750    EmitBranch(CastEnd);
1751
1752    EmitBlock(CastNull);
1753    EmitBranch(CastEnd);
1754  }
1755
1756  EmitBlock(CastEnd);
1757
1758  if (ShouldNullCheckSrcValue) {
1759    llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
1760    PHI->addIncoming(Value, CastNotNull);
1761    PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
1762
1763    Value = PHI;
1764  }
1765
1766  return Value;
1767}
1768
1769void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
1770  RunCleanupsScope Scope(*this);
1771  LValue SlotLV = MakeAddrLValue(Slot.getAddr(), E->getType(),
1772                                 Slot.getAlignment());
1773
1774  CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
1775  for (LambdaExpr::capture_init_iterator i = E->capture_init_begin(),
1776                                         e = E->capture_init_end();
1777       i != e; ++i, ++CurField) {
1778    // Emit initialization
1779
1780    LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
1781    ArrayRef<VarDecl *> ArrayIndexes;
1782    if (CurField->getType()->isArrayType())
1783      ArrayIndexes = E->getCaptureInitIndexVars(i);
1784    EmitInitializerForField(*CurField, LV, *i, ArrayIndexes);
1785  }
1786}
1787