CGExprCXX.cpp revision 176edba5311f6eff0cad2631449885ddf4fbc9ea
1//===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This contains code dealing with code generation of C++ expressions
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CGCUDARuntime.h"
16#include "CGCXXABI.h"
17#include "CGDebugInfo.h"
18#include "CGObjCRuntime.h"
19#include "clang/CodeGen/CGFunctionInfo.h"
20#include "clang/Frontend/CodeGenOptions.h"
21#include "llvm/IR/CallSite.h"
22#include "llvm/IR/Intrinsics.h"
23
24using namespace clang;
25using namespace CodeGen;
26
27static RequiredArgs commonEmitCXXMemberOrOperatorCall(
28    CodeGenFunction &CGF, const CXXMethodDecl *MD, llvm::Value *Callee,
29    ReturnValueSlot ReturnValue, llvm::Value *This, llvm::Value *ImplicitParam,
30    QualType ImplicitParamTy, const CallExpr *CE, CallArgList &Args) {
31  assert(CE == nullptr || isa<CXXMemberCallExpr>(CE) ||
32         isa<CXXOperatorCallExpr>(CE));
33  assert(MD->isInstance() &&
34         "Trying to emit a member or operator call expr on a static method!");
35
36  // C++11 [class.mfct.non-static]p2:
37  //   If a non-static member function of a class X is called for an object that
38  //   is not of type X, or of a type derived from X, the behavior is undefined.
39  SourceLocation CallLoc;
40  if (CE)
41    CallLoc = CE->getExprLoc();
42  CGF.EmitTypeCheck(
43      isa<CXXConstructorDecl>(MD) ? CodeGenFunction::TCK_ConstructorCall
44                                  : CodeGenFunction::TCK_MemberCall,
45      CallLoc, This, CGF.getContext().getRecordType(MD->getParent()));
46
47  // Push the this ptr.
48  Args.add(RValue::get(This), MD->getThisType(CGF.getContext()));
49
50  // If there is an implicit parameter (e.g. VTT), emit it.
51  if (ImplicitParam) {
52    Args.add(RValue::get(ImplicitParam), ImplicitParamTy);
53  }
54
55  const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
56  RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size());
57
58  // And the rest of the call args.
59  if (CE) {
60    // Special case: skip first argument of CXXOperatorCall (it is "this").
61    unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 1 : 0;
62    CGF.EmitCallArgs(Args, FPT, CE->arg_begin() + ArgsToSkip, CE->arg_end(),
63                     CE->getDirectCallee());
64  } else {
65    assert(
66        FPT->getNumParams() == 0 &&
67        "No CallExpr specified for function with non-zero number of arguments");
68  }
69  return required;
70}
71
72RValue CodeGenFunction::EmitCXXMemberOrOperatorCall(
73    const CXXMethodDecl *MD, llvm::Value *Callee, ReturnValueSlot ReturnValue,
74    llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy,
75    const CallExpr *CE) {
76  const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
77  CallArgList Args;
78  RequiredArgs required = commonEmitCXXMemberOrOperatorCall(
79      *this, MD, Callee, ReturnValue, This, ImplicitParam, ImplicitParamTy, CE,
80      Args);
81  return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
82                  Callee, ReturnValue, Args, MD);
83}
84
85RValue CodeGenFunction::EmitCXXStructorCall(
86    const CXXMethodDecl *MD, llvm::Value *Callee, ReturnValueSlot ReturnValue,
87    llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy,
88    const CallExpr *CE, StructorType Type) {
89  CallArgList Args;
90  commonEmitCXXMemberOrOperatorCall(*this, MD, Callee, ReturnValue, This,
91                                    ImplicitParam, ImplicitParamTy, CE, Args);
92  return EmitCall(CGM.getTypes().arrangeCXXStructorDeclaration(MD, Type),
93                  Callee, ReturnValue, Args, MD);
94}
95
96static CXXRecordDecl *getCXXRecord(const Expr *E) {
97  QualType T = E->getType();
98  if (const PointerType *PTy = T->getAs<PointerType>())
99    T = PTy->getPointeeType();
100  const RecordType *Ty = T->castAs<RecordType>();
101  return cast<CXXRecordDecl>(Ty->getDecl());
102}
103
104// Note: This function also emit constructor calls to support a MSVC
105// extensions allowing explicit constructor function call.
106RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
107                                              ReturnValueSlot ReturnValue) {
108  const Expr *callee = CE->getCallee()->IgnoreParens();
109
110  if (isa<BinaryOperator>(callee))
111    return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
112
113  const MemberExpr *ME = cast<MemberExpr>(callee);
114  const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
115
116  if (MD->isStatic()) {
117    // The method is static, emit it as we would a regular call.
118    llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
119    return EmitCall(getContext().getPointerType(MD->getType()), Callee, CE,
120                    ReturnValue);
121  }
122
123  // Compute the object pointer.
124  const Expr *Base = ME->getBase();
125  bool CanUseVirtualCall = MD->isVirtual() && !ME->hasQualifier();
126
127  const CXXMethodDecl *DevirtualizedMethod = nullptr;
128  if (CanUseVirtualCall && CanDevirtualizeMemberFunctionCall(Base, MD)) {
129    const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType();
130    DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl);
131    assert(DevirtualizedMethod);
132    const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent();
133    const Expr *Inner = Base->ignoreParenBaseCasts();
134    if (DevirtualizedMethod->getReturnType().getCanonicalType() !=
135        MD->getReturnType().getCanonicalType())
136      // If the return types are not the same, this might be a case where more
137      // code needs to run to compensate for it. For example, the derived
138      // method might return a type that inherits form from the return
139      // type of MD and has a prefix.
140      // For now we just avoid devirtualizing these covariant cases.
141      DevirtualizedMethod = nullptr;
142    else if (getCXXRecord(Inner) == DevirtualizedClass)
143      // If the class of the Inner expression is where the dynamic method
144      // is defined, build the this pointer from it.
145      Base = Inner;
146    else if (getCXXRecord(Base) != DevirtualizedClass) {
147      // If the method is defined in a class that is not the best dynamic
148      // one or the one of the full expression, we would have to build
149      // a derived-to-base cast to compute the correct this pointer, but
150      // we don't have support for that yet, so do a virtual call.
151      DevirtualizedMethod = nullptr;
152    }
153  }
154
155  llvm::Value *This;
156  if (ME->isArrow())
157    This = EmitScalarExpr(Base);
158  else
159    This = EmitLValue(Base).getAddress();
160
161
162  if (MD->isTrivial()) {
163    if (isa<CXXDestructorDecl>(MD)) return RValue::get(nullptr);
164    if (isa<CXXConstructorDecl>(MD) &&
165        cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
166      return RValue::get(nullptr);
167
168    if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
169      // We don't like to generate the trivial copy/move assignment operator
170      // when it isn't necessary; just produce the proper effect here.
171      llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
172      EmitAggregateAssign(This, RHS, CE->getType());
173      return RValue::get(This);
174    }
175
176    if (isa<CXXConstructorDecl>(MD) &&
177        cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
178      // Trivial move and copy ctor are the same.
179      assert(CE->getNumArgs() == 1 && "unexpected argcount for trivial ctor");
180      llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
181      EmitAggregateCopy(This, RHS, CE->arg_begin()->getType());
182      return RValue::get(This);
183    }
184    llvm_unreachable("unknown trivial member function");
185  }
186
187  // Compute the function type we're calling.
188  const CXXMethodDecl *CalleeDecl = DevirtualizedMethod ? DevirtualizedMethod : MD;
189  const CGFunctionInfo *FInfo = nullptr;
190  if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl))
191    FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
192        Dtor, StructorType::Complete);
193  else if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(CalleeDecl))
194    FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
195        Ctor, StructorType::Complete);
196  else
197    FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl);
198
199  llvm::FunctionType *Ty = CGM.getTypes().GetFunctionType(*FInfo);
200
201  // C++ [class.virtual]p12:
202  //   Explicit qualification with the scope operator (5.1) suppresses the
203  //   virtual call mechanism.
204  //
205  // We also don't emit a virtual call if the base expression has a record type
206  // because then we know what the type is.
207  bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod;
208  llvm::Value *Callee;
209
210  if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
211    assert(CE->arg_begin() == CE->arg_end() &&
212           "Destructor shouldn't have explicit parameters");
213    assert(ReturnValue.isNull() && "Destructor shouldn't have return value");
214    if (UseVirtualCall) {
215      CGM.getCXXABI().EmitVirtualDestructorCall(*this, Dtor, Dtor_Complete,
216                                                This, CE);
217    } else {
218      if (getLangOpts().AppleKext &&
219          MD->isVirtual() &&
220          ME->hasQualifier())
221        Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
222      else if (!DevirtualizedMethod)
223        Callee =
224            CGM.getAddrOfCXXStructor(Dtor, StructorType::Complete, FInfo, Ty);
225      else {
226        const CXXDestructorDecl *DDtor =
227          cast<CXXDestructorDecl>(DevirtualizedMethod);
228        Callee = CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty);
229      }
230      EmitCXXMemberOrOperatorCall(MD, Callee, ReturnValue, This,
231                                  /*ImplicitParam=*/nullptr, QualType(), CE);
232    }
233    return RValue::get(nullptr);
234  }
235
236  if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) {
237    Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
238  } else if (UseVirtualCall) {
239    Callee = CGM.getCXXABI().getVirtualFunctionPointer(*this, MD, This, Ty);
240  } else {
241    if (getLangOpts().AppleKext &&
242        MD->isVirtual() &&
243        ME->hasQualifier())
244      Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
245    else if (!DevirtualizedMethod)
246      Callee = CGM.GetAddrOfFunction(MD, Ty);
247    else {
248      Callee = CGM.GetAddrOfFunction(DevirtualizedMethod, Ty);
249    }
250  }
251
252  if (MD->isVirtual()) {
253    This = CGM.getCXXABI().adjustThisArgumentForVirtualFunctionCall(
254        *this, MD, This, UseVirtualCall);
255  }
256
257  return EmitCXXMemberOrOperatorCall(MD, Callee, ReturnValue, This,
258                                     /*ImplicitParam=*/nullptr, QualType(), CE);
259}
260
261RValue
262CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
263                                              ReturnValueSlot ReturnValue) {
264  const BinaryOperator *BO =
265      cast<BinaryOperator>(E->getCallee()->IgnoreParens());
266  const Expr *BaseExpr = BO->getLHS();
267  const Expr *MemFnExpr = BO->getRHS();
268
269  const MemberPointerType *MPT =
270    MemFnExpr->getType()->castAs<MemberPointerType>();
271
272  const FunctionProtoType *FPT =
273    MPT->getPointeeType()->castAs<FunctionProtoType>();
274  const CXXRecordDecl *RD =
275    cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
276
277  // Get the member function pointer.
278  llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
279
280  // Emit the 'this' pointer.
281  llvm::Value *This;
282
283  if (BO->getOpcode() == BO_PtrMemI)
284    This = EmitScalarExpr(BaseExpr);
285  else
286    This = EmitLValue(BaseExpr).getAddress();
287
288  EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This,
289                QualType(MPT->getClass(), 0));
290
291  // Ask the ABI to load the callee.  Note that This is modified.
292  llvm::Value *Callee =
293    CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, BO, This, MemFnPtr, MPT);
294
295  CallArgList Args;
296
297  QualType ThisType =
298    getContext().getPointerType(getContext().getTagDeclType(RD));
299
300  // Push the this ptr.
301  Args.add(RValue::get(This), ThisType);
302
303  RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1);
304
305  // And the rest of the call args
306  EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end(), E->getDirectCallee());
307  return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
308                  Callee, ReturnValue, Args);
309}
310
311RValue
312CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
313                                               const CXXMethodDecl *MD,
314                                               ReturnValueSlot ReturnValue) {
315  assert(MD->isInstance() &&
316         "Trying to emit a member call expr on a static method!");
317  LValue LV = EmitLValue(E->getArg(0));
318  llvm::Value *This = LV.getAddress();
319
320  if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
321      MD->isTrivial() && !MD->getParent()->mayInsertExtraPadding()) {
322    llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
323    QualType Ty = E->getType();
324    EmitAggregateAssign(This, Src, Ty);
325    return RValue::get(This);
326  }
327
328  llvm::Value *Callee = EmitCXXOperatorMemberCallee(E, MD, This);
329  return EmitCXXMemberOrOperatorCall(MD, Callee, ReturnValue, This,
330                                     /*ImplicitParam=*/nullptr, QualType(), E);
331}
332
333RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
334                                               ReturnValueSlot ReturnValue) {
335  return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue);
336}
337
338static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
339                                            llvm::Value *DestPtr,
340                                            const CXXRecordDecl *Base) {
341  if (Base->isEmpty())
342    return;
343
344  DestPtr = CGF.EmitCastToVoidPtr(DestPtr);
345
346  const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
347  CharUnits Size = Layout.getNonVirtualSize();
348  CharUnits Align = Layout.getNonVirtualAlignment();
349
350  llvm::Value *SizeVal = CGF.CGM.getSize(Size);
351
352  // If the type contains a pointer to data member we can't memset it to zero.
353  // Instead, create a null constant and copy it to the destination.
354  // TODO: there are other patterns besides zero that we can usefully memset,
355  // like -1, which happens to be the pattern used by member-pointers.
356  // TODO: isZeroInitializable can be over-conservative in the case where a
357  // virtual base contains a member pointer.
358  if (!CGF.CGM.getTypes().isZeroInitializable(Base)) {
359    llvm::Constant *NullConstant = CGF.CGM.EmitNullConstantForBase(Base);
360
361    llvm::GlobalVariable *NullVariable =
362      new llvm::GlobalVariable(CGF.CGM.getModule(), NullConstant->getType(),
363                               /*isConstant=*/true,
364                               llvm::GlobalVariable::PrivateLinkage,
365                               NullConstant, Twine());
366    NullVariable->setAlignment(Align.getQuantity());
367    llvm::Value *SrcPtr = CGF.EmitCastToVoidPtr(NullVariable);
368
369    // Get and call the appropriate llvm.memcpy overload.
370    CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity());
371    return;
372  }
373
374  // Otherwise, just memset the whole thing to zero.  This is legal
375  // because in LLVM, all default initializers (other than the ones we just
376  // handled above) are guaranteed to have a bit pattern of all zeros.
377  CGF.Builder.CreateMemSet(DestPtr, CGF.Builder.getInt8(0), SizeVal,
378                           Align.getQuantity());
379}
380
381void
382CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
383                                      AggValueSlot Dest) {
384  assert(!Dest.isIgnored() && "Must have a destination!");
385  const CXXConstructorDecl *CD = E->getConstructor();
386
387  // If we require zero initialization before (or instead of) calling the
388  // constructor, as can be the case with a non-user-provided default
389  // constructor, emit the zero initialization now, unless destination is
390  // already zeroed.
391  if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
392    switch (E->getConstructionKind()) {
393    case CXXConstructExpr::CK_Delegating:
394    case CXXConstructExpr::CK_Complete:
395      EmitNullInitialization(Dest.getAddr(), E->getType());
396      break;
397    case CXXConstructExpr::CK_VirtualBase:
398    case CXXConstructExpr::CK_NonVirtualBase:
399      EmitNullBaseClassInitialization(*this, Dest.getAddr(), CD->getParent());
400      break;
401    }
402  }
403
404  // If this is a call to a trivial default constructor, do nothing.
405  if (CD->isTrivial() && CD->isDefaultConstructor())
406    return;
407
408  // Elide the constructor if we're constructing from a temporary.
409  // The temporary check is required because Sema sets this on NRVO
410  // returns.
411  if (getLangOpts().ElideConstructors && E->isElidable()) {
412    assert(getContext().hasSameUnqualifiedType(E->getType(),
413                                               E->getArg(0)->getType()));
414    if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
415      EmitAggExpr(E->getArg(0), Dest);
416      return;
417    }
418  }
419
420  if (const ConstantArrayType *arrayType
421        = getContext().getAsConstantArrayType(E->getType())) {
422    EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddr(), E);
423  } else {
424    CXXCtorType Type = Ctor_Complete;
425    bool ForVirtualBase = false;
426    bool Delegating = false;
427
428    switch (E->getConstructionKind()) {
429     case CXXConstructExpr::CK_Delegating:
430      // We should be emitting a constructor; GlobalDecl will assert this
431      Type = CurGD.getCtorType();
432      Delegating = true;
433      break;
434
435     case CXXConstructExpr::CK_Complete:
436      Type = Ctor_Complete;
437      break;
438
439     case CXXConstructExpr::CK_VirtualBase:
440      ForVirtualBase = true;
441      // fall-through
442
443     case CXXConstructExpr::CK_NonVirtualBase:
444      Type = Ctor_Base;
445    }
446
447    // Call the constructor.
448    EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest.getAddr(),
449                           E);
450  }
451}
452
453void
454CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
455                                            llvm::Value *Src,
456                                            const Expr *Exp) {
457  if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
458    Exp = E->getSubExpr();
459  assert(isa<CXXConstructExpr>(Exp) &&
460         "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
461  const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
462  const CXXConstructorDecl *CD = E->getConstructor();
463  RunCleanupsScope Scope(*this);
464
465  // If we require zero initialization before (or instead of) calling the
466  // constructor, as can be the case with a non-user-provided default
467  // constructor, emit the zero initialization now.
468  // FIXME. Do I still need this for a copy ctor synthesis?
469  if (E->requiresZeroInitialization())
470    EmitNullInitialization(Dest, E->getType());
471
472  assert(!getContext().getAsConstantArrayType(E->getType())
473         && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
474  EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, E);
475}
476
477static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
478                                        const CXXNewExpr *E) {
479  if (!E->isArray())
480    return CharUnits::Zero();
481
482  // No cookie is required if the operator new[] being used is the
483  // reserved placement operator new[].
484  if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
485    return CharUnits::Zero();
486
487  return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
488}
489
490static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
491                                        const CXXNewExpr *e,
492                                        unsigned minElements,
493                                        llvm::Value *&numElements,
494                                        llvm::Value *&sizeWithoutCookie) {
495  QualType type = e->getAllocatedType();
496
497  if (!e->isArray()) {
498    CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
499    sizeWithoutCookie
500      = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
501    return sizeWithoutCookie;
502  }
503
504  // The width of size_t.
505  unsigned sizeWidth = CGF.SizeTy->getBitWidth();
506
507  // Figure out the cookie size.
508  llvm::APInt cookieSize(sizeWidth,
509                         CalculateCookiePadding(CGF, e).getQuantity());
510
511  // Emit the array size expression.
512  // We multiply the size of all dimensions for NumElements.
513  // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
514  numElements = CGF.EmitScalarExpr(e->getArraySize());
515  assert(isa<llvm::IntegerType>(numElements->getType()));
516
517  // The number of elements can be have an arbitrary integer type;
518  // essentially, we need to multiply it by a constant factor, add a
519  // cookie size, and verify that the result is representable as a
520  // size_t.  That's just a gloss, though, and it's wrong in one
521  // important way: if the count is negative, it's an error even if
522  // the cookie size would bring the total size >= 0.
523  bool isSigned
524    = e->getArraySize()->getType()->isSignedIntegerOrEnumerationType();
525  llvm::IntegerType *numElementsType
526    = cast<llvm::IntegerType>(numElements->getType());
527  unsigned numElementsWidth = numElementsType->getBitWidth();
528
529  // Compute the constant factor.
530  llvm::APInt arraySizeMultiplier(sizeWidth, 1);
531  while (const ConstantArrayType *CAT
532             = CGF.getContext().getAsConstantArrayType(type)) {
533    type = CAT->getElementType();
534    arraySizeMultiplier *= CAT->getSize();
535  }
536
537  CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
538  llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
539  typeSizeMultiplier *= arraySizeMultiplier;
540
541  // This will be a size_t.
542  llvm::Value *size;
543
544  // If someone is doing 'new int[42]' there is no need to do a dynamic check.
545  // Don't bloat the -O0 code.
546  if (llvm::ConstantInt *numElementsC =
547        dyn_cast<llvm::ConstantInt>(numElements)) {
548    const llvm::APInt &count = numElementsC->getValue();
549
550    bool hasAnyOverflow = false;
551
552    // If 'count' was a negative number, it's an overflow.
553    if (isSigned && count.isNegative())
554      hasAnyOverflow = true;
555
556    // We want to do all this arithmetic in size_t.  If numElements is
557    // wider than that, check whether it's already too big, and if so,
558    // overflow.
559    else if (numElementsWidth > sizeWidth &&
560             numElementsWidth - sizeWidth > count.countLeadingZeros())
561      hasAnyOverflow = true;
562
563    // Okay, compute a count at the right width.
564    llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
565
566    // If there is a brace-initializer, we cannot allocate fewer elements than
567    // there are initializers. If we do, that's treated like an overflow.
568    if (adjustedCount.ult(minElements))
569      hasAnyOverflow = true;
570
571    // Scale numElements by that.  This might overflow, but we don't
572    // care because it only overflows if allocationSize does, too, and
573    // if that overflows then we shouldn't use this.
574    numElements = llvm::ConstantInt::get(CGF.SizeTy,
575                                         adjustedCount * arraySizeMultiplier);
576
577    // Compute the size before cookie, and track whether it overflowed.
578    bool overflow;
579    llvm::APInt allocationSize
580      = adjustedCount.umul_ov(typeSizeMultiplier, overflow);
581    hasAnyOverflow |= overflow;
582
583    // Add in the cookie, and check whether it's overflowed.
584    if (cookieSize != 0) {
585      // Save the current size without a cookie.  This shouldn't be
586      // used if there was overflow.
587      sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
588
589      allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
590      hasAnyOverflow |= overflow;
591    }
592
593    // On overflow, produce a -1 so operator new will fail.
594    if (hasAnyOverflow) {
595      size = llvm::Constant::getAllOnesValue(CGF.SizeTy);
596    } else {
597      size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
598    }
599
600  // Otherwise, we might need to use the overflow intrinsics.
601  } else {
602    // There are up to five conditions we need to test for:
603    // 1) if isSigned, we need to check whether numElements is negative;
604    // 2) if numElementsWidth > sizeWidth, we need to check whether
605    //   numElements is larger than something representable in size_t;
606    // 3) if minElements > 0, we need to check whether numElements is smaller
607    //    than that.
608    // 4) we need to compute
609    //      sizeWithoutCookie := numElements * typeSizeMultiplier
610    //    and check whether it overflows; and
611    // 5) if we need a cookie, we need to compute
612    //      size := sizeWithoutCookie + cookieSize
613    //    and check whether it overflows.
614
615    llvm::Value *hasOverflow = nullptr;
616
617    // If numElementsWidth > sizeWidth, then one way or another, we're
618    // going to have to do a comparison for (2), and this happens to
619    // take care of (1), too.
620    if (numElementsWidth > sizeWidth) {
621      llvm::APInt threshold(numElementsWidth, 1);
622      threshold <<= sizeWidth;
623
624      llvm::Value *thresholdV
625        = llvm::ConstantInt::get(numElementsType, threshold);
626
627      hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);
628      numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);
629
630    // Otherwise, if we're signed, we want to sext up to size_t.
631    } else if (isSigned) {
632      if (numElementsWidth < sizeWidth)
633        numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);
634
635      // If there's a non-1 type size multiplier, then we can do the
636      // signedness check at the same time as we do the multiply
637      // because a negative number times anything will cause an
638      // unsigned overflow.  Otherwise, we have to do it here. But at least
639      // in this case, we can subsume the >= minElements check.
640      if (typeSizeMultiplier == 1)
641        hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
642                              llvm::ConstantInt::get(CGF.SizeTy, minElements));
643
644    // Otherwise, zext up to size_t if necessary.
645    } else if (numElementsWidth < sizeWidth) {
646      numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);
647    }
648
649    assert(numElements->getType() == CGF.SizeTy);
650
651    if (minElements) {
652      // Don't allow allocation of fewer elements than we have initializers.
653      if (!hasOverflow) {
654        hasOverflow = CGF.Builder.CreateICmpULT(numElements,
655                              llvm::ConstantInt::get(CGF.SizeTy, minElements));
656      } else if (numElementsWidth > sizeWidth) {
657        // The other existing overflow subsumes this check.
658        // We do an unsigned comparison, since any signed value < -1 is
659        // taken care of either above or below.
660        hasOverflow = CGF.Builder.CreateOr(hasOverflow,
661                          CGF.Builder.CreateICmpULT(numElements,
662                              llvm::ConstantInt::get(CGF.SizeTy, minElements)));
663      }
664    }
665
666    size = numElements;
667
668    // Multiply by the type size if necessary.  This multiplier
669    // includes all the factors for nested arrays.
670    //
671    // This step also causes numElements to be scaled up by the
672    // nested-array factor if necessary.  Overflow on this computation
673    // can be ignored because the result shouldn't be used if
674    // allocation fails.
675    if (typeSizeMultiplier != 1) {
676      llvm::Value *umul_with_overflow
677        = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
678
679      llvm::Value *tsmV =
680        llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
681      llvm::Value *result =
682        CGF.Builder.CreateCall2(umul_with_overflow, size, tsmV);
683
684      llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
685      if (hasOverflow)
686        hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
687      else
688        hasOverflow = overflowed;
689
690      size = CGF.Builder.CreateExtractValue(result, 0);
691
692      // Also scale up numElements by the array size multiplier.
693      if (arraySizeMultiplier != 1) {
694        // If the base element type size is 1, then we can re-use the
695        // multiply we just did.
696        if (typeSize.isOne()) {
697          assert(arraySizeMultiplier == typeSizeMultiplier);
698          numElements = size;
699
700        // Otherwise we need a separate multiply.
701        } else {
702          llvm::Value *asmV =
703            llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
704          numElements = CGF.Builder.CreateMul(numElements, asmV);
705        }
706      }
707    } else {
708      // numElements doesn't need to be scaled.
709      assert(arraySizeMultiplier == 1);
710    }
711
712    // Add in the cookie size if necessary.
713    if (cookieSize != 0) {
714      sizeWithoutCookie = size;
715
716      llvm::Value *uadd_with_overflow
717        = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
718
719      llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
720      llvm::Value *result =
721        CGF.Builder.CreateCall2(uadd_with_overflow, size, cookieSizeV);
722
723      llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
724      if (hasOverflow)
725        hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
726      else
727        hasOverflow = overflowed;
728
729      size = CGF.Builder.CreateExtractValue(result, 0);
730    }
731
732    // If we had any possibility of dynamic overflow, make a select to
733    // overwrite 'size' with an all-ones value, which should cause
734    // operator new to throw.
735    if (hasOverflow)
736      size = CGF.Builder.CreateSelect(hasOverflow,
737                                 llvm::Constant::getAllOnesValue(CGF.SizeTy),
738                                      size);
739  }
740
741  if (cookieSize == 0)
742    sizeWithoutCookie = size;
743  else
744    assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
745
746  return size;
747}
748
749static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
750                                    QualType AllocType, llvm::Value *NewPtr) {
751  // FIXME: Refactor with EmitExprAsInit.
752  CharUnits Alignment = CGF.getContext().getTypeAlignInChars(AllocType);
753  switch (CGF.getEvaluationKind(AllocType)) {
754  case TEK_Scalar:
755    CGF.EmitScalarInit(Init, nullptr, CGF.MakeAddrLValue(NewPtr, AllocType,
756                                                         Alignment),
757                       false);
758    return;
759  case TEK_Complex:
760    CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType,
761                                                           Alignment),
762                                  /*isInit*/ true);
763    return;
764  case TEK_Aggregate: {
765    AggValueSlot Slot
766      = AggValueSlot::forAddr(NewPtr, Alignment, AllocType.getQualifiers(),
767                              AggValueSlot::IsDestructed,
768                              AggValueSlot::DoesNotNeedGCBarriers,
769                              AggValueSlot::IsNotAliased);
770    CGF.EmitAggExpr(Init, Slot);
771    return;
772  }
773  }
774  llvm_unreachable("bad evaluation kind");
775}
776
777void
778CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
779                                         QualType ElementType,
780                                         llvm::Value *BeginPtr,
781                                         llvm::Value *NumElements,
782                                         llvm::Value *AllocSizeWithoutCookie) {
783  // If we have a type with trivial initialization and no initializer,
784  // there's nothing to do.
785  if (!E->hasInitializer())
786    return;
787
788  llvm::Value *CurPtr = BeginPtr;
789
790  unsigned InitListElements = 0;
791
792  const Expr *Init = E->getInitializer();
793  llvm::AllocaInst *EndOfInit = nullptr;
794  QualType::DestructionKind DtorKind = ElementType.isDestructedType();
795  EHScopeStack::stable_iterator Cleanup;
796  llvm::Instruction *CleanupDominator = nullptr;
797
798  // If the initializer is an initializer list, first do the explicit elements.
799  if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
800    InitListElements = ILE->getNumInits();
801
802    // If this is a multi-dimensional array new, we will initialize multiple
803    // elements with each init list element.
804    QualType AllocType = E->getAllocatedType();
805    if (const ConstantArrayType *CAT = dyn_cast_or_null<ConstantArrayType>(
806            AllocType->getAsArrayTypeUnsafe())) {
807      unsigned AS = CurPtr->getType()->getPointerAddressSpace();
808      llvm::Type *AllocPtrTy = ConvertTypeForMem(AllocType)->getPointerTo(AS);
809      CurPtr = Builder.CreateBitCast(CurPtr, AllocPtrTy);
810      InitListElements *= getContext().getConstantArrayElementCount(CAT);
811    }
812
813    // Enter a partial-destruction Cleanup if necessary.
814    if (needsEHCleanup(DtorKind)) {
815      // In principle we could tell the Cleanup where we are more
816      // directly, but the control flow can get so varied here that it
817      // would actually be quite complex.  Therefore we go through an
818      // alloca.
819      EndOfInit = CreateTempAlloca(BeginPtr->getType(), "array.init.end");
820      CleanupDominator = Builder.CreateStore(BeginPtr, EndOfInit);
821      pushIrregularPartialArrayCleanup(BeginPtr, EndOfInit, ElementType,
822                                       getDestroyer(DtorKind));
823      Cleanup = EHStack.stable_begin();
824    }
825
826    for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) {
827      // Tell the cleanup that it needs to destroy up to this
828      // element.  TODO: some of these stores can be trivially
829      // observed to be unnecessary.
830      if (EndOfInit)
831        Builder.CreateStore(Builder.CreateBitCast(CurPtr, BeginPtr->getType()),
832                            EndOfInit);
833      // FIXME: If the last initializer is an incomplete initializer list for
834      // an array, and we have an array filler, we can fold together the two
835      // initialization loops.
836      StoreAnyExprIntoOneUnit(*this, ILE->getInit(i),
837                              ILE->getInit(i)->getType(), CurPtr);
838      CurPtr = Builder.CreateConstInBoundsGEP1_32(CurPtr, 1, "array.exp.next");
839    }
840
841    // The remaining elements are filled with the array filler expression.
842    Init = ILE->getArrayFiller();
843
844    // Extract the initializer for the individual array elements by pulling
845    // out the array filler from all the nested initializer lists. This avoids
846    // generating a nested loop for the initialization.
847    while (Init && Init->getType()->isConstantArrayType()) {
848      auto *SubILE = dyn_cast<InitListExpr>(Init);
849      if (!SubILE)
850        break;
851      assert(SubILE->getNumInits() == 0 && "explicit inits in array filler?");
852      Init = SubILE->getArrayFiller();
853    }
854
855    // Switch back to initializing one base element at a time.
856    CurPtr = Builder.CreateBitCast(CurPtr, BeginPtr->getType());
857  }
858
859  // Attempt to perform zero-initialization using memset.
860  auto TryMemsetInitialization = [&]() -> bool {
861    // FIXME: If the type is a pointer-to-data-member under the Itanium ABI,
862    // we can initialize with a memset to -1.
863    if (!CGM.getTypes().isZeroInitializable(ElementType))
864      return false;
865
866    // Optimization: since zero initialization will just set the memory
867    // to all zeroes, generate a single memset to do it in one shot.
868
869    // Subtract out the size of any elements we've already initialized.
870    auto *RemainingSize = AllocSizeWithoutCookie;
871    if (InitListElements) {
872      // We know this can't overflow; we check this when doing the allocation.
873      auto *InitializedSize = llvm::ConstantInt::get(
874          RemainingSize->getType(),
875          getContext().getTypeSizeInChars(ElementType).getQuantity() *
876              InitListElements);
877      RemainingSize = Builder.CreateSub(RemainingSize, InitializedSize);
878    }
879
880    // Create the memset.
881    CharUnits Alignment = getContext().getTypeAlignInChars(ElementType);
882    Builder.CreateMemSet(CurPtr, Builder.getInt8(0), RemainingSize,
883                         Alignment.getQuantity(), false);
884    return true;
885  };
886
887  // If all elements have already been initialized, skip any further
888  // initialization.
889  llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
890  if (ConstNum && ConstNum->getZExtValue() <= InitListElements) {
891    // If there was a Cleanup, deactivate it.
892    if (CleanupDominator)
893      DeactivateCleanupBlock(Cleanup, CleanupDominator);
894    return;
895  }
896
897  assert(Init && "have trailing elements to initialize but no initializer");
898
899  // If this is a constructor call, try to optimize it out, and failing that
900  // emit a single loop to initialize all remaining elements.
901  if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init)) {
902    CXXConstructorDecl *Ctor = CCE->getConstructor();
903    if (Ctor->isTrivial()) {
904      // If new expression did not specify value-initialization, then there
905      // is no initialization.
906      if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty())
907        return;
908
909      if (TryMemsetInitialization())
910        return;
911    }
912
913    // Store the new Cleanup position for irregular Cleanups.
914    //
915    // FIXME: Share this cleanup with the constructor call emission rather than
916    // having it create a cleanup of its own.
917    if (EndOfInit) Builder.CreateStore(CurPtr, EndOfInit);
918
919    // Emit a constructor call loop to initialize the remaining elements.
920    if (InitListElements)
921      NumElements = Builder.CreateSub(
922          NumElements,
923          llvm::ConstantInt::get(NumElements->getType(), InitListElements));
924    EmitCXXAggrConstructorCall(Ctor, NumElements, CurPtr, CCE,
925                               CCE->requiresZeroInitialization());
926    return;
927  }
928
929  // If this is value-initialization, we can usually use memset.
930  ImplicitValueInitExpr IVIE(ElementType);
931  if (isa<ImplicitValueInitExpr>(Init)) {
932    if (TryMemsetInitialization())
933      return;
934
935    // Switch to an ImplicitValueInitExpr for the element type. This handles
936    // only one case: multidimensional array new of pointers to members. In
937    // all other cases, we already have an initializer for the array element.
938    Init = &IVIE;
939  }
940
941  // At this point we should have found an initializer for the individual
942  // elements of the array.
943  assert(getContext().hasSameUnqualifiedType(ElementType, Init->getType()) &&
944         "got wrong type of element to initialize");
945
946  // If we have an empty initializer list, we can usually use memset.
947  if (auto *ILE = dyn_cast<InitListExpr>(Init))
948    if (ILE->getNumInits() == 0 && TryMemsetInitialization())
949      return;
950
951  // Create the loop blocks.
952  llvm::BasicBlock *EntryBB = Builder.GetInsertBlock();
953  llvm::BasicBlock *LoopBB = createBasicBlock("new.loop");
954  llvm::BasicBlock *ContBB = createBasicBlock("new.loop.end");
955
956  // Find the end of the array, hoisted out of the loop.
957  llvm::Value *EndPtr =
958    Builder.CreateInBoundsGEP(BeginPtr, NumElements, "array.end");
959
960  // If the number of elements isn't constant, we have to now check if there is
961  // anything left to initialize.
962  if (!ConstNum) {
963    llvm::Value *IsEmpty = Builder.CreateICmpEQ(CurPtr, EndPtr,
964                                                "array.isempty");
965    Builder.CreateCondBr(IsEmpty, ContBB, LoopBB);
966  }
967
968  // Enter the loop.
969  EmitBlock(LoopBB);
970
971  // Set up the current-element phi.
972  llvm::PHINode *CurPtrPhi =
973    Builder.CreatePHI(CurPtr->getType(), 2, "array.cur");
974  CurPtrPhi->addIncoming(CurPtr, EntryBB);
975  CurPtr = CurPtrPhi;
976
977  // Store the new Cleanup position for irregular Cleanups.
978  if (EndOfInit) Builder.CreateStore(CurPtr, EndOfInit);
979
980  // Enter a partial-destruction Cleanup if necessary.
981  if (!CleanupDominator && needsEHCleanup(DtorKind)) {
982    pushRegularPartialArrayCleanup(BeginPtr, CurPtr, ElementType,
983                                   getDestroyer(DtorKind));
984    Cleanup = EHStack.stable_begin();
985    CleanupDominator = Builder.CreateUnreachable();
986  }
987
988  // Emit the initializer into this element.
989  StoreAnyExprIntoOneUnit(*this, Init, Init->getType(), CurPtr);
990
991  // Leave the Cleanup if we entered one.
992  if (CleanupDominator) {
993    DeactivateCleanupBlock(Cleanup, CleanupDominator);
994    CleanupDominator->eraseFromParent();
995  }
996
997  // Advance to the next element by adjusting the pointer type as necessary.
998  llvm::Value *NextPtr =
999      Builder.CreateConstInBoundsGEP1_32(CurPtr, 1, "array.next");
1000
1001  // Check whether we've gotten to the end of the array and, if so,
1002  // exit the loop.
1003  llvm::Value *IsEnd = Builder.CreateICmpEQ(NextPtr, EndPtr, "array.atend");
1004  Builder.CreateCondBr(IsEnd, ContBB, LoopBB);
1005  CurPtrPhi->addIncoming(NextPtr, Builder.GetInsertBlock());
1006
1007  EmitBlock(ContBB);
1008}
1009
1010static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
1011                               QualType ElementType,
1012                               llvm::Value *NewPtr,
1013                               llvm::Value *NumElements,
1014                               llvm::Value *AllocSizeWithoutCookie) {
1015  if (E->isArray())
1016    CGF.EmitNewArrayInitializer(E, ElementType, NewPtr, NumElements,
1017                                AllocSizeWithoutCookie);
1018  else if (const Expr *Init = E->getInitializer())
1019    StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr);
1020}
1021
1022/// Emit a call to an operator new or operator delete function, as implicitly
1023/// created by new-expressions and delete-expressions.
1024static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
1025                                const FunctionDecl *Callee,
1026                                const FunctionProtoType *CalleeType,
1027                                const CallArgList &Args) {
1028  llvm::Instruction *CallOrInvoke;
1029  llvm::Value *CalleeAddr = CGF.CGM.GetAddrOfFunction(Callee);
1030  RValue RV =
1031      CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(Args, CalleeType),
1032                   CalleeAddr, ReturnValueSlot(), Args,
1033                   Callee, &CallOrInvoke);
1034
1035  /// C++1y [expr.new]p10:
1036  ///   [In a new-expression,] an implementation is allowed to omit a call
1037  ///   to a replaceable global allocation function.
1038  ///
1039  /// We model such elidable calls with the 'builtin' attribute.
1040  llvm::Function *Fn = dyn_cast<llvm::Function>(CalleeAddr);
1041  if (Callee->isReplaceableGlobalAllocationFunction() &&
1042      Fn && Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)) {
1043    // FIXME: Add addAttribute to CallSite.
1044    if (llvm::CallInst *CI = dyn_cast<llvm::CallInst>(CallOrInvoke))
1045      CI->addAttribute(llvm::AttributeSet::FunctionIndex,
1046                       llvm::Attribute::Builtin);
1047    else if (llvm::InvokeInst *II = dyn_cast<llvm::InvokeInst>(CallOrInvoke))
1048      II->addAttribute(llvm::AttributeSet::FunctionIndex,
1049                       llvm::Attribute::Builtin);
1050    else
1051      llvm_unreachable("unexpected kind of call instruction");
1052  }
1053
1054  return RV;
1055}
1056
1057RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
1058                                                 const Expr *Arg,
1059                                                 bool IsDelete) {
1060  CallArgList Args;
1061  const Stmt *ArgS = Arg;
1062  EmitCallArgs(Args, *Type->param_type_begin(),
1063               ConstExprIterator(&ArgS), ConstExprIterator(&ArgS + 1));
1064  // Find the allocation or deallocation function that we're calling.
1065  ASTContext &Ctx = getContext();
1066  DeclarationName Name = Ctx.DeclarationNames
1067      .getCXXOperatorName(IsDelete ? OO_Delete : OO_New);
1068  for (auto *Decl : Ctx.getTranslationUnitDecl()->lookup(Name))
1069    if (auto *FD = dyn_cast<FunctionDecl>(Decl))
1070      if (Ctx.hasSameType(FD->getType(), QualType(Type, 0)))
1071        return EmitNewDeleteCall(*this, cast<FunctionDecl>(Decl), Type, Args);
1072  llvm_unreachable("predeclared global operator new/delete is missing");
1073}
1074
1075namespace {
1076  /// A cleanup to call the given 'operator delete' function upon
1077  /// abnormal exit from a new expression.
1078  class CallDeleteDuringNew : public EHScopeStack::Cleanup {
1079    size_t NumPlacementArgs;
1080    const FunctionDecl *OperatorDelete;
1081    llvm::Value *Ptr;
1082    llvm::Value *AllocSize;
1083
1084    RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
1085
1086  public:
1087    static size_t getExtraSize(size_t NumPlacementArgs) {
1088      return NumPlacementArgs * sizeof(RValue);
1089    }
1090
1091    CallDeleteDuringNew(size_t NumPlacementArgs,
1092                        const FunctionDecl *OperatorDelete,
1093                        llvm::Value *Ptr,
1094                        llvm::Value *AllocSize)
1095      : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
1096        Ptr(Ptr), AllocSize(AllocSize) {}
1097
1098    void setPlacementArg(unsigned I, RValue Arg) {
1099      assert(I < NumPlacementArgs && "index out of range");
1100      getPlacementArgs()[I] = Arg;
1101    }
1102
1103    void Emit(CodeGenFunction &CGF, Flags flags) override {
1104      const FunctionProtoType *FPT
1105        = OperatorDelete->getType()->getAs<FunctionProtoType>();
1106      assert(FPT->getNumParams() == NumPlacementArgs + 1 ||
1107             (FPT->getNumParams() == 2 && NumPlacementArgs == 0));
1108
1109      CallArgList DeleteArgs;
1110
1111      // The first argument is always a void*.
1112      FunctionProtoType::param_type_iterator AI = FPT->param_type_begin();
1113      DeleteArgs.add(RValue::get(Ptr), *AI++);
1114
1115      // A member 'operator delete' can take an extra 'size_t' argument.
1116      if (FPT->getNumParams() == NumPlacementArgs + 2)
1117        DeleteArgs.add(RValue::get(AllocSize), *AI++);
1118
1119      // Pass the rest of the arguments, which must match exactly.
1120      for (unsigned I = 0; I != NumPlacementArgs; ++I)
1121        DeleteArgs.add(getPlacementArgs()[I], *AI++);
1122
1123      // Call 'operator delete'.
1124      EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
1125    }
1126  };
1127
1128  /// A cleanup to call the given 'operator delete' function upon
1129  /// abnormal exit from a new expression when the new expression is
1130  /// conditional.
1131  class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup {
1132    size_t NumPlacementArgs;
1133    const FunctionDecl *OperatorDelete;
1134    DominatingValue<RValue>::saved_type Ptr;
1135    DominatingValue<RValue>::saved_type AllocSize;
1136
1137    DominatingValue<RValue>::saved_type *getPlacementArgs() {
1138      return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
1139    }
1140
1141  public:
1142    static size_t getExtraSize(size_t NumPlacementArgs) {
1143      return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
1144    }
1145
1146    CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
1147                                   const FunctionDecl *OperatorDelete,
1148                                   DominatingValue<RValue>::saved_type Ptr,
1149                              DominatingValue<RValue>::saved_type AllocSize)
1150      : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
1151        Ptr(Ptr), AllocSize(AllocSize) {}
1152
1153    void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
1154      assert(I < NumPlacementArgs && "index out of range");
1155      getPlacementArgs()[I] = Arg;
1156    }
1157
1158    void Emit(CodeGenFunction &CGF, Flags flags) override {
1159      const FunctionProtoType *FPT
1160        = OperatorDelete->getType()->getAs<FunctionProtoType>();
1161      assert(FPT->getNumParams() == NumPlacementArgs + 1 ||
1162             (FPT->getNumParams() == 2 && NumPlacementArgs == 0));
1163
1164      CallArgList DeleteArgs;
1165
1166      // The first argument is always a void*.
1167      FunctionProtoType::param_type_iterator AI = FPT->param_type_begin();
1168      DeleteArgs.add(Ptr.restore(CGF), *AI++);
1169
1170      // A member 'operator delete' can take an extra 'size_t' argument.
1171      if (FPT->getNumParams() == NumPlacementArgs + 2) {
1172        RValue RV = AllocSize.restore(CGF);
1173        DeleteArgs.add(RV, *AI++);
1174      }
1175
1176      // Pass the rest of the arguments, which must match exactly.
1177      for (unsigned I = 0; I != NumPlacementArgs; ++I) {
1178        RValue RV = getPlacementArgs()[I].restore(CGF);
1179        DeleteArgs.add(RV, *AI++);
1180      }
1181
1182      // Call 'operator delete'.
1183      EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
1184    }
1185  };
1186}
1187
1188/// Enter a cleanup to call 'operator delete' if the initializer in a
1189/// new-expression throws.
1190static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
1191                                  const CXXNewExpr *E,
1192                                  llvm::Value *NewPtr,
1193                                  llvm::Value *AllocSize,
1194                                  const CallArgList &NewArgs) {
1195  // If we're not inside a conditional branch, then the cleanup will
1196  // dominate and we can do the easier (and more efficient) thing.
1197  if (!CGF.isInConditionalBranch()) {
1198    CallDeleteDuringNew *Cleanup = CGF.EHStack
1199      .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
1200                                                 E->getNumPlacementArgs(),
1201                                                 E->getOperatorDelete(),
1202                                                 NewPtr, AllocSize);
1203    for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
1204      Cleanup->setPlacementArg(I, NewArgs[I+1].RV);
1205
1206    return;
1207  }
1208
1209  // Otherwise, we need to save all this stuff.
1210  DominatingValue<RValue>::saved_type SavedNewPtr =
1211    DominatingValue<RValue>::save(CGF, RValue::get(NewPtr));
1212  DominatingValue<RValue>::saved_type SavedAllocSize =
1213    DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
1214
1215  CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
1216    .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(EHCleanup,
1217                                                 E->getNumPlacementArgs(),
1218                                                 E->getOperatorDelete(),
1219                                                 SavedNewPtr,
1220                                                 SavedAllocSize);
1221  for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
1222    Cleanup->setPlacementArg(I,
1223                     DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV));
1224
1225  CGF.initFullExprCleanup();
1226}
1227
1228llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
1229  // The element type being allocated.
1230  QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
1231
1232  // 1. Build a call to the allocation function.
1233  FunctionDecl *allocator = E->getOperatorNew();
1234  const FunctionProtoType *allocatorType =
1235    allocator->getType()->castAs<FunctionProtoType>();
1236
1237  CallArgList allocatorArgs;
1238
1239  // The allocation size is the first argument.
1240  QualType sizeType = getContext().getSizeType();
1241
1242  // If there is a brace-initializer, cannot allocate fewer elements than inits.
1243  unsigned minElements = 0;
1244  if (E->isArray() && E->hasInitializer()) {
1245    if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer()))
1246      minElements = ILE->getNumInits();
1247  }
1248
1249  llvm::Value *numElements = nullptr;
1250  llvm::Value *allocSizeWithoutCookie = nullptr;
1251  llvm::Value *allocSize =
1252    EmitCXXNewAllocSize(*this, E, minElements, numElements,
1253                        allocSizeWithoutCookie);
1254
1255  allocatorArgs.add(RValue::get(allocSize), sizeType);
1256
1257  // We start at 1 here because the first argument (the allocation size)
1258  // has already been emitted.
1259  EmitCallArgs(allocatorArgs, allocatorType, E->placement_arg_begin(),
1260               E->placement_arg_end(), /* CalleeDecl */ nullptr,
1261               /*ParamsToSkip*/ 1);
1262
1263  // Emit the allocation call.  If the allocator is a global placement
1264  // operator, just "inline" it directly.
1265  RValue RV;
1266  if (allocator->isReservedGlobalPlacementOperator()) {
1267    assert(allocatorArgs.size() == 2);
1268    RV = allocatorArgs[1].RV;
1269    // TODO: kill any unnecessary computations done for the size
1270    // argument.
1271  } else {
1272    RV = EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
1273  }
1274
1275  // Emit a null check on the allocation result if the allocation
1276  // function is allowed to return null (because it has a non-throwing
1277  // exception spec; for this part, we inline
1278  // CXXNewExpr::shouldNullCheckAllocation()) and we have an
1279  // interesting initializer.
1280  bool nullCheck = allocatorType->isNothrow(getContext()) &&
1281    (!allocType.isPODType(getContext()) || E->hasInitializer());
1282
1283  llvm::BasicBlock *nullCheckBB = nullptr;
1284  llvm::BasicBlock *contBB = nullptr;
1285
1286  llvm::Value *allocation = RV.getScalarVal();
1287  unsigned AS = allocation->getType()->getPointerAddressSpace();
1288
1289  // The null-check means that the initializer is conditionally
1290  // evaluated.
1291  ConditionalEvaluation conditional(*this);
1292
1293  if (nullCheck) {
1294    conditional.begin(*this);
1295
1296    nullCheckBB = Builder.GetInsertBlock();
1297    llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
1298    contBB = createBasicBlock("new.cont");
1299
1300    llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull");
1301    Builder.CreateCondBr(isNull, contBB, notNullBB);
1302    EmitBlock(notNullBB);
1303  }
1304
1305  // If there's an operator delete, enter a cleanup to call it if an
1306  // exception is thrown.
1307  EHScopeStack::stable_iterator operatorDeleteCleanup;
1308  llvm::Instruction *cleanupDominator = nullptr;
1309  if (E->getOperatorDelete() &&
1310      !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1311    EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs);
1312    operatorDeleteCleanup = EHStack.stable_begin();
1313    cleanupDominator = Builder.CreateUnreachable();
1314  }
1315
1316  assert((allocSize == allocSizeWithoutCookie) ==
1317         CalculateCookiePadding(*this, E).isZero());
1318  if (allocSize != allocSizeWithoutCookie) {
1319    assert(E->isArray());
1320    allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
1321                                                       numElements,
1322                                                       E, allocType);
1323  }
1324
1325  llvm::Type *elementPtrTy
1326    = ConvertTypeForMem(allocType)->getPointerTo(AS);
1327  llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy);
1328
1329  EmitNewInitializer(*this, E, allocType, result, numElements,
1330                     allocSizeWithoutCookie);
1331  if (E->isArray()) {
1332    // NewPtr is a pointer to the base element type.  If we're
1333    // allocating an array of arrays, we'll need to cast back to the
1334    // array pointer type.
1335    llvm::Type *resultType = ConvertTypeForMem(E->getType());
1336    if (result->getType() != resultType)
1337      result = Builder.CreateBitCast(result, resultType);
1338  }
1339
1340  // Deactivate the 'operator delete' cleanup if we finished
1341  // initialization.
1342  if (operatorDeleteCleanup.isValid()) {
1343    DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
1344    cleanupDominator->eraseFromParent();
1345  }
1346
1347  if (nullCheck) {
1348    conditional.end(*this);
1349
1350    llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
1351    EmitBlock(contBB);
1352
1353    llvm::PHINode *PHI = Builder.CreatePHI(result->getType(), 2);
1354    PHI->addIncoming(result, notNullBB);
1355    PHI->addIncoming(llvm::Constant::getNullValue(result->getType()),
1356                     nullCheckBB);
1357
1358    result = PHI;
1359  }
1360
1361  return result;
1362}
1363
1364void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1365                                     llvm::Value *Ptr,
1366                                     QualType DeleteTy) {
1367  assert(DeleteFD->getOverloadedOperator() == OO_Delete);
1368
1369  const FunctionProtoType *DeleteFTy =
1370    DeleteFD->getType()->getAs<FunctionProtoType>();
1371
1372  CallArgList DeleteArgs;
1373
1374  // Check if we need to pass the size to the delete operator.
1375  llvm::Value *Size = nullptr;
1376  QualType SizeTy;
1377  if (DeleteFTy->getNumParams() == 2) {
1378    SizeTy = DeleteFTy->getParamType(1);
1379    CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
1380    Size = llvm::ConstantInt::get(ConvertType(SizeTy),
1381                                  DeleteTypeSize.getQuantity());
1382  }
1383
1384  QualType ArgTy = DeleteFTy->getParamType(0);
1385  llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
1386  DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
1387
1388  if (Size)
1389    DeleteArgs.add(RValue::get(Size), SizeTy);
1390
1391  // Emit the call to delete.
1392  EmitNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs);
1393}
1394
1395namespace {
1396  /// Calls the given 'operator delete' on a single object.
1397  struct CallObjectDelete : EHScopeStack::Cleanup {
1398    llvm::Value *Ptr;
1399    const FunctionDecl *OperatorDelete;
1400    QualType ElementType;
1401
1402    CallObjectDelete(llvm::Value *Ptr,
1403                     const FunctionDecl *OperatorDelete,
1404                     QualType ElementType)
1405      : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1406
1407    void Emit(CodeGenFunction &CGF, Flags flags) override {
1408      CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
1409    }
1410  };
1411}
1412
1413void
1414CodeGenFunction::pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
1415                                             llvm::Value *CompletePtr,
1416                                             QualType ElementType) {
1417  EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, CompletePtr,
1418                                        OperatorDelete, ElementType);
1419}
1420
1421/// Emit the code for deleting a single object.
1422static void EmitObjectDelete(CodeGenFunction &CGF,
1423                             const CXXDeleteExpr *DE,
1424                             llvm::Value *Ptr,
1425                             QualType ElementType) {
1426  // Find the destructor for the type, if applicable.  If the
1427  // destructor is virtual, we'll just emit the vcall and return.
1428  const CXXDestructorDecl *Dtor = nullptr;
1429  if (const RecordType *RT = ElementType->getAs<RecordType>()) {
1430    CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1431    if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
1432      Dtor = RD->getDestructor();
1433
1434      if (Dtor->isVirtual()) {
1435        CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,
1436                                                    Dtor);
1437        return;
1438      }
1439    }
1440  }
1441
1442  // Make sure that we call delete even if the dtor throws.
1443  // This doesn't have to a conditional cleanup because we're going
1444  // to pop it off in a second.
1445  const FunctionDecl *OperatorDelete = DE->getOperatorDelete();
1446  CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1447                                            Ptr, OperatorDelete, ElementType);
1448
1449  if (Dtor)
1450    CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1451                              /*ForVirtualBase=*/false,
1452                              /*Delegating=*/false,
1453                              Ptr);
1454  else if (CGF.getLangOpts().ObjCAutoRefCount &&
1455           ElementType->isObjCLifetimeType()) {
1456    switch (ElementType.getObjCLifetime()) {
1457    case Qualifiers::OCL_None:
1458    case Qualifiers::OCL_ExplicitNone:
1459    case Qualifiers::OCL_Autoreleasing:
1460      break;
1461
1462    case Qualifiers::OCL_Strong: {
1463      // Load the pointer value.
1464      llvm::Value *PtrValue = CGF.Builder.CreateLoad(Ptr,
1465                                             ElementType.isVolatileQualified());
1466
1467      CGF.EmitARCRelease(PtrValue, ARCPreciseLifetime);
1468      break;
1469    }
1470
1471    case Qualifiers::OCL_Weak:
1472      CGF.EmitARCDestroyWeak(Ptr);
1473      break;
1474    }
1475  }
1476
1477  CGF.PopCleanupBlock();
1478}
1479
1480namespace {
1481  /// Calls the given 'operator delete' on an array of objects.
1482  struct CallArrayDelete : EHScopeStack::Cleanup {
1483    llvm::Value *Ptr;
1484    const FunctionDecl *OperatorDelete;
1485    llvm::Value *NumElements;
1486    QualType ElementType;
1487    CharUnits CookieSize;
1488
1489    CallArrayDelete(llvm::Value *Ptr,
1490                    const FunctionDecl *OperatorDelete,
1491                    llvm::Value *NumElements,
1492                    QualType ElementType,
1493                    CharUnits CookieSize)
1494      : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
1495        ElementType(ElementType), CookieSize(CookieSize) {}
1496
1497    void Emit(CodeGenFunction &CGF, Flags flags) override {
1498      const FunctionProtoType *DeleteFTy =
1499        OperatorDelete->getType()->getAs<FunctionProtoType>();
1500      assert(DeleteFTy->getNumParams() == 1 || DeleteFTy->getNumParams() == 2);
1501
1502      CallArgList Args;
1503
1504      // Pass the pointer as the first argument.
1505      QualType VoidPtrTy = DeleteFTy->getParamType(0);
1506      llvm::Value *DeletePtr
1507        = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
1508      Args.add(RValue::get(DeletePtr), VoidPtrTy);
1509
1510      // Pass the original requested size as the second argument.
1511      if (DeleteFTy->getNumParams() == 2) {
1512        QualType size_t = DeleteFTy->getParamType(1);
1513        llvm::IntegerType *SizeTy
1514          = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
1515
1516        CharUnits ElementTypeSize =
1517          CGF.CGM.getContext().getTypeSizeInChars(ElementType);
1518
1519        // The size of an element, multiplied by the number of elements.
1520        llvm::Value *Size
1521          = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
1522        Size = CGF.Builder.CreateMul(Size, NumElements);
1523
1524        // Plus the size of the cookie if applicable.
1525        if (!CookieSize.isZero()) {
1526          llvm::Value *CookieSizeV
1527            = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
1528          Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
1529        }
1530
1531        Args.add(RValue::get(Size), size_t);
1532      }
1533
1534      // Emit the call to delete.
1535      EmitNewDeleteCall(CGF, OperatorDelete, DeleteFTy, Args);
1536    }
1537  };
1538}
1539
1540/// Emit the code for deleting an array of objects.
1541static void EmitArrayDelete(CodeGenFunction &CGF,
1542                            const CXXDeleteExpr *E,
1543                            llvm::Value *deletedPtr,
1544                            QualType elementType) {
1545  llvm::Value *numElements = nullptr;
1546  llvm::Value *allocatedPtr = nullptr;
1547  CharUnits cookieSize;
1548  CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,
1549                                      numElements, allocatedPtr, cookieSize);
1550
1551  assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
1552
1553  // Make sure that we call delete even if one of the dtors throws.
1554  const FunctionDecl *operatorDelete = E->getOperatorDelete();
1555  CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
1556                                           allocatedPtr, operatorDelete,
1557                                           numElements, elementType,
1558                                           cookieSize);
1559
1560  // Destroy the elements.
1561  if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
1562    assert(numElements && "no element count for a type with a destructor!");
1563
1564    llvm::Value *arrayEnd =
1565      CGF.Builder.CreateInBoundsGEP(deletedPtr, numElements, "delete.end");
1566
1567    // Note that it is legal to allocate a zero-length array, and we
1568    // can never fold the check away because the length should always
1569    // come from a cookie.
1570    CGF.emitArrayDestroy(deletedPtr, arrayEnd, elementType,
1571                         CGF.getDestroyer(dtorKind),
1572                         /*checkZeroLength*/ true,
1573                         CGF.needsEHCleanup(dtorKind));
1574  }
1575
1576  // Pop the cleanup block.
1577  CGF.PopCleanupBlock();
1578}
1579
1580void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
1581  const Expr *Arg = E->getArgument();
1582  llvm::Value *Ptr = EmitScalarExpr(Arg);
1583
1584  // Null check the pointer.
1585  llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
1586  llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
1587
1588  llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull");
1589
1590  Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
1591  EmitBlock(DeleteNotNull);
1592
1593  // We might be deleting a pointer to array.  If so, GEP down to the
1594  // first non-array element.
1595  // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
1596  QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
1597  if (DeleteTy->isConstantArrayType()) {
1598    llvm::Value *Zero = Builder.getInt32(0);
1599    SmallVector<llvm::Value*,8> GEP;
1600
1601    GEP.push_back(Zero); // point at the outermost array
1602
1603    // For each layer of array type we're pointing at:
1604    while (const ConstantArrayType *Arr
1605             = getContext().getAsConstantArrayType(DeleteTy)) {
1606      // 1. Unpeel the array type.
1607      DeleteTy = Arr->getElementType();
1608
1609      // 2. GEP to the first element of the array.
1610      GEP.push_back(Zero);
1611    }
1612
1613    Ptr = Builder.CreateInBoundsGEP(Ptr, GEP, "del.first");
1614  }
1615
1616  assert(ConvertTypeForMem(DeleteTy) ==
1617         cast<llvm::PointerType>(Ptr->getType())->getElementType());
1618
1619  if (E->isArrayForm()) {
1620    EmitArrayDelete(*this, E, Ptr, DeleteTy);
1621  } else {
1622    EmitObjectDelete(*this, E, Ptr, DeleteTy);
1623  }
1624
1625  EmitBlock(DeleteEnd);
1626}
1627
1628static bool isGLValueFromPointerDeref(const Expr *E) {
1629  E = E->IgnoreParens();
1630
1631  if (const auto *CE = dyn_cast<CastExpr>(E)) {
1632    if (!CE->getSubExpr()->isGLValue())
1633      return false;
1634    return isGLValueFromPointerDeref(CE->getSubExpr());
1635  }
1636
1637  if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E))
1638    return isGLValueFromPointerDeref(OVE->getSourceExpr());
1639
1640  if (const auto *BO = dyn_cast<BinaryOperator>(E))
1641    if (BO->getOpcode() == BO_Comma)
1642      return isGLValueFromPointerDeref(BO->getRHS());
1643
1644  if (const auto *ACO = dyn_cast<AbstractConditionalOperator>(E))
1645    return isGLValueFromPointerDeref(ACO->getTrueExpr()) ||
1646           isGLValueFromPointerDeref(ACO->getFalseExpr());
1647
1648  // C++11 [expr.sub]p1:
1649  //   The expression E1[E2] is identical (by definition) to *((E1)+(E2))
1650  if (isa<ArraySubscriptExpr>(E))
1651    return true;
1652
1653  if (const auto *UO = dyn_cast<UnaryOperator>(E))
1654    if (UO->getOpcode() == UO_Deref)
1655      return true;
1656
1657  return false;
1658}
1659
1660static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E,
1661                                         llvm::Type *StdTypeInfoPtrTy) {
1662  // Get the vtable pointer.
1663  llvm::Value *ThisPtr = CGF.EmitLValue(E).getAddress();
1664
1665  // C++ [expr.typeid]p2:
1666  //   If the glvalue expression is obtained by applying the unary * operator to
1667  //   a pointer and the pointer is a null pointer value, the typeid expression
1668  //   throws the std::bad_typeid exception.
1669  //
1670  // However, this paragraph's intent is not clear.  We choose a very generous
1671  // interpretation which implores us to consider comma operators, conditional
1672  // operators, parentheses and other such constructs.
1673  QualType SrcRecordTy = E->getType();
1674  if (CGF.CGM.getCXXABI().shouldTypeidBeNullChecked(
1675          isGLValueFromPointerDeref(E), SrcRecordTy)) {
1676    llvm::BasicBlock *BadTypeidBlock =
1677        CGF.createBasicBlock("typeid.bad_typeid");
1678    llvm::BasicBlock *EndBlock = CGF.createBasicBlock("typeid.end");
1679
1680    llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr);
1681    CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
1682
1683    CGF.EmitBlock(BadTypeidBlock);
1684    CGF.CGM.getCXXABI().EmitBadTypeidCall(CGF);
1685    CGF.EmitBlock(EndBlock);
1686  }
1687
1688  return CGF.CGM.getCXXABI().EmitTypeid(CGF, SrcRecordTy, ThisPtr,
1689                                        StdTypeInfoPtrTy);
1690}
1691
1692llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
1693  llvm::Type *StdTypeInfoPtrTy =
1694    ConvertType(E->getType())->getPointerTo();
1695
1696  if (E->isTypeOperand()) {
1697    llvm::Constant *TypeInfo =
1698        CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand(getContext()));
1699    return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
1700  }
1701
1702  // C++ [expr.typeid]p2:
1703  //   When typeid is applied to a glvalue expression whose type is a
1704  //   polymorphic class type, the result refers to a std::type_info object
1705  //   representing the type of the most derived object (that is, the dynamic
1706  //   type) to which the glvalue refers.
1707  if (E->isPotentiallyEvaluated())
1708    return EmitTypeidFromVTable(*this, E->getExprOperand(),
1709                                StdTypeInfoPtrTy);
1710
1711  QualType OperandTy = E->getExprOperand()->getType();
1712  return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
1713                               StdTypeInfoPtrTy);
1714}
1715
1716static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
1717                                          QualType DestTy) {
1718  llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1719  if (DestTy->isPointerType())
1720    return llvm::Constant::getNullValue(DestLTy);
1721
1722  /// C++ [expr.dynamic.cast]p9:
1723  ///   A failed cast to reference type throws std::bad_cast
1724  if (!CGF.CGM.getCXXABI().EmitBadCastCall(CGF))
1725    return nullptr;
1726
1727  CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
1728  return llvm::UndefValue::get(DestLTy);
1729}
1730
1731llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
1732                                              const CXXDynamicCastExpr *DCE) {
1733  QualType DestTy = DCE->getTypeAsWritten();
1734
1735  if (DCE->isAlwaysNull())
1736    if (llvm::Value *T = EmitDynamicCastToNull(*this, DestTy))
1737      return T;
1738
1739  QualType SrcTy = DCE->getSubExpr()->getType();
1740
1741  // C++ [expr.dynamic.cast]p7:
1742  //   If T is "pointer to cv void," then the result is a pointer to the most
1743  //   derived object pointed to by v.
1744  const PointerType *DestPTy = DestTy->getAs<PointerType>();
1745
1746  bool isDynamicCastToVoid;
1747  QualType SrcRecordTy;
1748  QualType DestRecordTy;
1749  if (DestPTy) {
1750    isDynamicCastToVoid = DestPTy->getPointeeType()->isVoidType();
1751    SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
1752    DestRecordTy = DestPTy->getPointeeType();
1753  } else {
1754    isDynamicCastToVoid = false;
1755    SrcRecordTy = SrcTy;
1756    DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
1757  }
1758
1759  assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
1760
1761  // C++ [expr.dynamic.cast]p4:
1762  //   If the value of v is a null pointer value in the pointer case, the result
1763  //   is the null pointer value of type T.
1764  bool ShouldNullCheckSrcValue =
1765      CGM.getCXXABI().shouldDynamicCastCallBeNullChecked(SrcTy->isPointerType(),
1766                                                         SrcRecordTy);
1767
1768  llvm::BasicBlock *CastNull = nullptr;
1769  llvm::BasicBlock *CastNotNull = nullptr;
1770  llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
1771
1772  if (ShouldNullCheckSrcValue) {
1773    CastNull = createBasicBlock("dynamic_cast.null");
1774    CastNotNull = createBasicBlock("dynamic_cast.notnull");
1775
1776    llvm::Value *IsNull = Builder.CreateIsNull(Value);
1777    Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
1778    EmitBlock(CastNotNull);
1779  }
1780
1781  if (isDynamicCastToVoid) {
1782    Value = CGM.getCXXABI().EmitDynamicCastToVoid(*this, Value, SrcRecordTy,
1783                                                  DestTy);
1784  } else {
1785    assert(DestRecordTy->isRecordType() &&
1786           "destination type must be a record type!");
1787    Value = CGM.getCXXABI().EmitDynamicCastCall(*this, Value, SrcRecordTy,
1788                                                DestTy, DestRecordTy, CastEnd);
1789  }
1790
1791  if (ShouldNullCheckSrcValue) {
1792    EmitBranch(CastEnd);
1793
1794    EmitBlock(CastNull);
1795    EmitBranch(CastEnd);
1796  }
1797
1798  EmitBlock(CastEnd);
1799
1800  if (ShouldNullCheckSrcValue) {
1801    llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
1802    PHI->addIncoming(Value, CastNotNull);
1803    PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
1804
1805    Value = PHI;
1806  }
1807
1808  return Value;
1809}
1810
1811void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
1812  RunCleanupsScope Scope(*this);
1813  LValue SlotLV =
1814      MakeAddrLValue(Slot.getAddr(), E->getType(), Slot.getAlignment());
1815
1816  CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
1817  for (LambdaExpr::capture_init_iterator i = E->capture_init_begin(),
1818                                         e = E->capture_init_end();
1819       i != e; ++i, ++CurField) {
1820    // Emit initialization
1821    LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
1822    if (CurField->hasCapturedVLAType()) {
1823      auto VAT = CurField->getCapturedVLAType();
1824      EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
1825    } else {
1826      ArrayRef<VarDecl *> ArrayIndexes;
1827      if (CurField->getType()->isArrayType())
1828        ArrayIndexes = E->getCaptureInitIndexVars(i);
1829      EmitInitializerForField(*CurField, LV, *i, ArrayIndexes);
1830    }
1831  }
1832}
1833