CGExprCXX.cpp revision b924124316becf2968a37dab36d0c48ea167666f
1//===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This contains code dealing with code generation of C++ expressions
11//
12//===----------------------------------------------------------------------===//
13
14#include "clang/Frontend/CodeGenOptions.h"
15#include "CodeGenFunction.h"
16#include "CGCXXABI.h"
17#include "CGObjCRuntime.h"
18#include "CGDebugInfo.h"
19#include "llvm/Intrinsics.h"
20using namespace clang;
21using namespace CodeGen;
22
23RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
24                                          llvm::Value *Callee,
25                                          ReturnValueSlot ReturnValue,
26                                          llvm::Value *This,
27                                          llvm::Value *VTT,
28                                          CallExpr::const_arg_iterator ArgBeg,
29                                          CallExpr::const_arg_iterator ArgEnd) {
30  assert(MD->isInstance() &&
31         "Trying to emit a member call expr on a static method!");
32
33  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
34
35  CallArgList Args;
36
37  // Push the this ptr.
38  Args.push_back(std::make_pair(RValue::get(This),
39                                MD->getThisType(getContext())));
40
41  // If there is a VTT parameter, emit it.
42  if (VTT) {
43    QualType T = getContext().getPointerType(getContext().VoidPtrTy);
44    Args.push_back(std::make_pair(RValue::get(VTT), T));
45  }
46
47  // And the rest of the call args
48  EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
49
50  QualType ResultType = FPT->getResultType();
51  return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args,
52                                                 FPT->getExtInfo()),
53                  Callee, ReturnValue, Args, MD);
54}
55
56static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) {
57  const Expr *E = Base;
58
59  while (true) {
60    E = E->IgnoreParens();
61    if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
62      if (CE->getCastKind() == CK_DerivedToBase ||
63          CE->getCastKind() == CK_UncheckedDerivedToBase ||
64          CE->getCastKind() == CK_NoOp) {
65        E = CE->getSubExpr();
66        continue;
67      }
68    }
69
70    break;
71  }
72
73  QualType DerivedType = E->getType();
74  if (const PointerType *PTy = DerivedType->getAs<PointerType>())
75    DerivedType = PTy->getPointeeType();
76
77  return cast<CXXRecordDecl>(DerivedType->castAs<RecordType>()->getDecl());
78}
79
80// FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
81// quite what we want.
82static const Expr *skipNoOpCastsAndParens(const Expr *E) {
83  while (true) {
84    if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
85      E = PE->getSubExpr();
86      continue;
87    }
88
89    if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
90      if (CE->getCastKind() == CK_NoOp) {
91        E = CE->getSubExpr();
92        continue;
93      }
94    }
95    if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
96      if (UO->getOpcode() == UO_Extension) {
97        E = UO->getSubExpr();
98        continue;
99      }
100    }
101    return E;
102  }
103}
104
105/// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
106/// expr can be devirtualized.
107static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
108                                               const Expr *Base,
109                                               const CXXMethodDecl *MD) {
110
111  // When building with -fapple-kext, all calls must go through the vtable since
112  // the kernel linker can do runtime patching of vtables.
113  if (Context.getLangOptions().AppleKext)
114    return false;
115
116  // If the most derived class is marked final, we know that no subclass can
117  // override this member function and so we can devirtualize it. For example:
118  //
119  // struct A { virtual void f(); }
120  // struct B final : A { };
121  //
122  // void f(B *b) {
123  //   b->f();
124  // }
125  //
126  const CXXRecordDecl *MostDerivedClassDecl = getMostDerivedClassDecl(Base);
127  if (MostDerivedClassDecl->hasAttr<FinalAttr>())
128    return true;
129
130  // If the member function is marked 'final', we know that it can't be
131  // overridden and can therefore devirtualize it.
132  if (MD->hasAttr<FinalAttr>())
133    return true;
134
135  // Similarly, if the class itself is marked 'final' it can't be overridden
136  // and we can therefore devirtualize the member function call.
137  if (MD->getParent()->hasAttr<FinalAttr>())
138    return true;
139
140  Base = skipNoOpCastsAndParens(Base);
141  if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
142    if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
143      // This is a record decl. We know the type and can devirtualize it.
144      return VD->getType()->isRecordType();
145    }
146
147    return false;
148  }
149
150  // We can always devirtualize calls on temporary object expressions.
151  if (isa<CXXConstructExpr>(Base))
152    return true;
153
154  // And calls on bound temporaries.
155  if (isa<CXXBindTemporaryExpr>(Base))
156    return true;
157
158  // Check if this is a call expr that returns a record type.
159  if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
160    return CE->getCallReturnType()->isRecordType();
161
162  // We can't devirtualize the call.
163  return false;
164}
165
166// Note: This function also emit constructor calls to support a MSVC
167// extensions allowing explicit constructor function call.
168RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
169                                              ReturnValueSlot ReturnValue) {
170  if (isa<BinaryOperator>(CE->getCallee()->IgnoreParens()))
171    return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
172
173  const MemberExpr *ME = cast<MemberExpr>(CE->getCallee()->IgnoreParens());
174  const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
175
176  CGDebugInfo *DI = getDebugInfo();
177  if (DI && CGM.getCodeGenOpts().LimitDebugInfo
178      && !isa<CallExpr>(ME->getBase())) {
179    QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType();
180    if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) {
181      DI->getOrCreateRecordType(PTy->getPointeeType(),
182                                MD->getParent()->getLocation());
183    }
184  }
185
186  if (MD->isStatic()) {
187    // The method is static, emit it as we would a regular call.
188    llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
189    return EmitCall(getContext().getPointerType(MD->getType()), Callee,
190                    ReturnValue, CE->arg_begin(), CE->arg_end());
191  }
192
193  // Compute the object pointer.
194  llvm::Value *This;
195  if (ME->isArrow())
196    This = EmitScalarExpr(ME->getBase());
197  else
198    This = EmitLValue(ME->getBase()).getAddress();
199
200  if (MD->isTrivial()) {
201    if (isa<CXXDestructorDecl>(MD)) return RValue::get(0);
202    if (isa<CXXConstructorDecl>(MD) &&
203        cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
204      return RValue::get(0);
205
206    if (MD->isCopyAssignmentOperator()) {
207      // We don't like to generate the trivial copy assignment operator when
208      // it isn't necessary; just produce the proper effect here.
209      llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
210      EmitAggregateCopy(This, RHS, CE->getType());
211      return RValue::get(This);
212    }
213
214    if (isa<CXXConstructorDecl>(MD) &&
215        cast<CXXConstructorDecl>(MD)->isCopyConstructor()) {
216      llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
217      EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS,
218                                     CE->arg_begin(), CE->arg_end());
219      return RValue::get(This);
220    }
221    llvm_unreachable("unknown trivial member function");
222  }
223
224  // Compute the function type we're calling.
225  const CGFunctionInfo *FInfo = 0;
226  if (isa<CXXDestructorDecl>(MD))
227    FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXDestructorDecl>(MD),
228                                           Dtor_Complete);
229  else if (isa<CXXConstructorDecl>(MD))
230    FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXConstructorDecl>(MD),
231                                            Ctor_Complete);
232  else
233    FInfo = &CGM.getTypes().getFunctionInfo(MD);
234
235  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
236  const llvm::Type *Ty
237    = CGM.getTypes().GetFunctionType(*FInfo, FPT->isVariadic());
238
239  // C++ [class.virtual]p12:
240  //   Explicit qualification with the scope operator (5.1) suppresses the
241  //   virtual call mechanism.
242  //
243  // We also don't emit a virtual call if the base expression has a record type
244  // because then we know what the type is.
245  bool UseVirtualCall;
246  UseVirtualCall = MD->isVirtual() && !ME->hasQualifier()
247                   && !canDevirtualizeMemberFunctionCalls(getContext(),
248                                                          ME->getBase(), MD);
249  llvm::Value *Callee;
250  if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
251    if (UseVirtualCall) {
252      Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty);
253    } else {
254      if (getContext().getLangOptions().AppleKext &&
255          MD->isVirtual() &&
256          ME->hasQualifier())
257        Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
258      else
259        Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty);
260    }
261  } else if (const CXXConstructorDecl *Ctor =
262               dyn_cast<CXXConstructorDecl>(MD)) {
263    Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
264  } else if (UseVirtualCall) {
265      Callee = BuildVirtualCall(MD, This, Ty);
266  } else {
267    if (getContext().getLangOptions().AppleKext &&
268        MD->isVirtual() &&
269        ME->hasQualifier())
270      Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
271    else
272      Callee = CGM.GetAddrOfFunction(MD, Ty);
273  }
274
275  return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
276                           CE->arg_begin(), CE->arg_end());
277}
278
279RValue
280CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
281                                              ReturnValueSlot ReturnValue) {
282  const BinaryOperator *BO =
283      cast<BinaryOperator>(E->getCallee()->IgnoreParens());
284  const Expr *BaseExpr = BO->getLHS();
285  const Expr *MemFnExpr = BO->getRHS();
286
287  const MemberPointerType *MPT =
288    MemFnExpr->getType()->getAs<MemberPointerType>();
289
290  const FunctionProtoType *FPT =
291    MPT->getPointeeType()->getAs<FunctionProtoType>();
292  const CXXRecordDecl *RD =
293    cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
294
295  // Get the member function pointer.
296  llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
297
298  // Emit the 'this' pointer.
299  llvm::Value *This;
300
301  if (BO->getOpcode() == BO_PtrMemI)
302    This = EmitScalarExpr(BaseExpr);
303  else
304    This = EmitLValue(BaseExpr).getAddress();
305
306  // Ask the ABI to load the callee.  Note that This is modified.
307  llvm::Value *Callee =
308    CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, This, MemFnPtr, MPT);
309
310  CallArgList Args;
311
312  QualType ThisType =
313    getContext().getPointerType(getContext().getTagDeclType(RD));
314
315  // Push the this ptr.
316  Args.push_back(std::make_pair(RValue::get(This), ThisType));
317
318  // And the rest of the call args
319  EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
320  const FunctionType *BO_FPT = BO->getType()->getAs<FunctionProtoType>();
321  return EmitCall(CGM.getTypes().getFunctionInfo(Args, BO_FPT), Callee,
322                  ReturnValue, Args);
323}
324
325RValue
326CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
327                                               const CXXMethodDecl *MD,
328                                               ReturnValueSlot ReturnValue) {
329  assert(MD->isInstance() &&
330         "Trying to emit a member call expr on a static method!");
331  LValue LV = EmitLValue(E->getArg(0));
332  llvm::Value *This = LV.getAddress();
333
334  if (MD->isCopyAssignmentOperator()) {
335    const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(MD->getDeclContext());
336    if (ClassDecl->hasTrivialCopyAssignment()) {
337      assert(!ClassDecl->hasUserDeclaredCopyAssignment() &&
338             "EmitCXXOperatorMemberCallExpr - user declared copy assignment");
339      llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
340      QualType Ty = E->getType();
341      EmitAggregateCopy(This, Src, Ty);
342      return RValue::get(This);
343    }
344  }
345
346  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
347  const llvm::Type *Ty =
348    CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
349                                   FPT->isVariadic());
350  llvm::Value *Callee;
351  if (MD->isVirtual() &&
352      !canDevirtualizeMemberFunctionCalls(getContext(),
353                                           E->getArg(0), MD))
354    Callee = BuildVirtualCall(MD, This, Ty);
355  else
356    Callee = CGM.GetAddrOfFunction(MD, Ty);
357
358  return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
359                           E->arg_begin() + 1, E->arg_end());
360}
361
362void
363CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
364                                      AggValueSlot Dest) {
365  assert(!Dest.isIgnored() && "Must have a destination!");
366  const CXXConstructorDecl *CD = E->getConstructor();
367
368  // If we require zero initialization before (or instead of) calling the
369  // constructor, as can be the case with a non-user-provided default
370  // constructor, emit the zero initialization now.
371  if (E->requiresZeroInitialization())
372    EmitNullInitialization(Dest.getAddr(), E->getType());
373
374  // If this is a call to a trivial default constructor, do nothing.
375  if (CD->isTrivial() && CD->isDefaultConstructor())
376    return;
377
378  // Elide the constructor if we're constructing from a temporary.
379  // The temporary check is required because Sema sets this on NRVO
380  // returns.
381  if (getContext().getLangOptions().ElideConstructors && E->isElidable()) {
382    assert(getContext().hasSameUnqualifiedType(E->getType(),
383                                               E->getArg(0)->getType()));
384    if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
385      EmitAggExpr(E->getArg(0), Dest);
386      return;
387    }
388  }
389
390  const ConstantArrayType *Array
391    = getContext().getAsConstantArrayType(E->getType());
392  if (Array) {
393    QualType BaseElementTy = getContext().getBaseElementType(Array);
394    const llvm::Type *BasePtr = ConvertType(BaseElementTy);
395    BasePtr = llvm::PointerType::getUnqual(BasePtr);
396    llvm::Value *BaseAddrPtr =
397      Builder.CreateBitCast(Dest.getAddr(), BasePtr);
398
399    EmitCXXAggrConstructorCall(CD, Array, BaseAddrPtr,
400                               E->arg_begin(), E->arg_end());
401  }
402  else {
403    CXXCtorType Type =
404      (E->getConstructionKind() == CXXConstructExpr::CK_Complete)
405      ? Ctor_Complete : Ctor_Base;
406    bool ForVirtualBase =
407      E->getConstructionKind() == CXXConstructExpr::CK_VirtualBase;
408
409    // Call the constructor.
410    EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest.getAddr(),
411                           E->arg_begin(), E->arg_end());
412  }
413}
414
415void
416CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
417                                            llvm::Value *Src,
418                                            const Expr *Exp) {
419  if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
420    Exp = E->getSubExpr();
421  assert(isa<CXXConstructExpr>(Exp) &&
422         "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
423  const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
424  const CXXConstructorDecl *CD = E->getConstructor();
425  RunCleanupsScope Scope(*this);
426
427  // If we require zero initialization before (or instead of) calling the
428  // constructor, as can be the case with a non-user-provided default
429  // constructor, emit the zero initialization now.
430  // FIXME. Do I still need this for a copy ctor synthesis?
431  if (E->requiresZeroInitialization())
432    EmitNullInitialization(Dest, E->getType());
433
434  assert(!getContext().getAsConstantArrayType(E->getType())
435         && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
436  EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src,
437                                 E->arg_begin(), E->arg_end());
438}
439
440/// Check whether the given operator new[] is the global placement
441/// operator new[].
442static bool IsPlacementOperatorNewArray(ASTContext &Ctx,
443                                        const FunctionDecl *Fn) {
444  // Must be in global scope.  Note that allocation functions can't be
445  // declared in namespaces.
446  if (!Fn->getDeclContext()->getRedeclContext()->isFileContext())
447    return false;
448
449  // Signature must be void *operator new[](size_t, void*).
450  // The size_t is common to all operator new[]s.
451  if (Fn->getNumParams() != 2)
452    return false;
453
454  CanQualType ParamType = Ctx.getCanonicalType(Fn->getParamDecl(1)->getType());
455  return (ParamType == Ctx.VoidPtrTy);
456}
457
458static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
459                                        const CXXNewExpr *E) {
460  if (!E->isArray())
461    return CharUnits::Zero();
462
463  // No cookie is required if the new operator being used is
464  // ::operator new[](size_t, void*).
465  const FunctionDecl *OperatorNew = E->getOperatorNew();
466  if (IsPlacementOperatorNewArray(CGF.getContext(), OperatorNew))
467    return CharUnits::Zero();
468
469  return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
470}
471
472static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context,
473                                        CodeGenFunction &CGF,
474                                        const CXXNewExpr *E,
475                                        llvm::Value *&NumElements,
476                                        llvm::Value *&SizeWithoutCookie) {
477  QualType ElemType = E->getAllocatedType();
478
479  const llvm::IntegerType *SizeTy =
480    cast<llvm::IntegerType>(CGF.ConvertType(CGF.getContext().getSizeType()));
481
482  CharUnits TypeSize = CGF.getContext().getTypeSizeInChars(ElemType);
483
484  if (!E->isArray()) {
485    SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
486    return SizeWithoutCookie;
487  }
488
489  // Figure out the cookie size.
490  CharUnits CookieSize = CalculateCookiePadding(CGF, E);
491
492  // Emit the array size expression.
493  // We multiply the size of all dimensions for NumElements.
494  // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
495  NumElements = CGF.EmitScalarExpr(E->getArraySize());
496  assert(NumElements->getType() == SizeTy && "element count not a size_t");
497
498  uint64_t ArraySizeMultiplier = 1;
499  while (const ConstantArrayType *CAT
500             = CGF.getContext().getAsConstantArrayType(ElemType)) {
501    ElemType = CAT->getElementType();
502    ArraySizeMultiplier *= CAT->getSize().getZExtValue();
503  }
504
505  llvm::Value *Size;
506
507  // If someone is doing 'new int[42]' there is no need to do a dynamic check.
508  // Don't bloat the -O0 code.
509  if (llvm::ConstantInt *NumElementsC =
510        dyn_cast<llvm::ConstantInt>(NumElements)) {
511    llvm::APInt NEC = NumElementsC->getValue();
512    unsigned SizeWidth = NEC.getBitWidth();
513
514    // Determine if there is an overflow here by doing an extended multiply.
515    NEC = NEC.zext(SizeWidth*2);
516    llvm::APInt SC(SizeWidth*2, TypeSize.getQuantity());
517    SC *= NEC;
518
519    if (!CookieSize.isZero()) {
520      // Save the current size without a cookie.  We don't care if an
521      // overflow's already happened because SizeWithoutCookie isn't
522      // used if the allocator returns null or throws, as it should
523      // always do on an overflow.
524      llvm::APInt SWC = SC.trunc(SizeWidth);
525      SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, SWC);
526
527      // Add the cookie size.
528      SC += llvm::APInt(SizeWidth*2, CookieSize.getQuantity());
529    }
530
531    if (SC.countLeadingZeros() >= SizeWidth) {
532      SC = SC.trunc(SizeWidth);
533      Size = llvm::ConstantInt::get(SizeTy, SC);
534    } else {
535      // On overflow, produce a -1 so operator new throws.
536      Size = llvm::Constant::getAllOnesValue(SizeTy);
537    }
538
539    // Scale NumElements while we're at it.
540    uint64_t N = NEC.getZExtValue() * ArraySizeMultiplier;
541    NumElements = llvm::ConstantInt::get(SizeTy, N);
542
543  // Otherwise, we don't need to do an overflow-checked multiplication if
544  // we're multiplying by one.
545  } else if (TypeSize.isOne()) {
546    assert(ArraySizeMultiplier == 1);
547
548    Size = NumElements;
549
550    // If we need a cookie, add its size in with an overflow check.
551    // This is maybe a little paranoid.
552    if (!CookieSize.isZero()) {
553      SizeWithoutCookie = Size;
554
555      llvm::Value *CookieSizeV
556        = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
557
558      const llvm::Type *Types[] = { SizeTy };
559      llvm::Value *UAddF
560        = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1);
561      llvm::Value *AddRes
562        = CGF.Builder.CreateCall2(UAddF, Size, CookieSizeV);
563
564      Size = CGF.Builder.CreateExtractValue(AddRes, 0);
565      llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1);
566      Size = CGF.Builder.CreateSelect(DidOverflow,
567                                      llvm::ConstantInt::get(SizeTy, -1),
568                                      Size);
569    }
570
571  // Otherwise use the int.umul.with.overflow intrinsic.
572  } else {
573    llvm::Value *OutermostElementSize
574      = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
575
576    llvm::Value *NumOutermostElements = NumElements;
577
578    // Scale NumElements by the array size multiplier.  This might
579    // overflow, but only if the multiplication below also overflows,
580    // in which case this multiplication isn't used.
581    if (ArraySizeMultiplier != 1)
582      NumElements = CGF.Builder.CreateMul(NumElements,
583                         llvm::ConstantInt::get(SizeTy, ArraySizeMultiplier));
584
585    // The requested size of the outermost array is non-constant.
586    // Multiply that by the static size of the elements of that array;
587    // on unsigned overflow, set the size to -1 to trigger an
588    // exception from the allocation routine.  This is sufficient to
589    // prevent buffer overruns from the allocator returning a
590    // seemingly valid pointer to insufficient space.  This idea comes
591    // originally from MSVC, and GCC has an open bug requesting
592    // similar behavior:
593    //   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=19351
594    //
595    // This will not be sufficient for C++0x, which requires a
596    // specific exception class (std::bad_array_new_length).
597    // That will require ABI support that has not yet been specified.
598    const llvm::Type *Types[] = { SizeTy };
599    llvm::Value *UMulF
600      = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, Types, 1);
601    llvm::Value *MulRes = CGF.Builder.CreateCall2(UMulF, NumOutermostElements,
602                                                  OutermostElementSize);
603
604    // The overflow bit.
605    llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(MulRes, 1);
606
607    // The result of the multiplication.
608    Size = CGF.Builder.CreateExtractValue(MulRes, 0);
609
610    // If we have a cookie, we need to add that size in, too.
611    if (!CookieSize.isZero()) {
612      SizeWithoutCookie = Size;
613
614      llvm::Value *CookieSizeV
615        = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
616      llvm::Value *UAddF
617        = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1);
618      llvm::Value *AddRes
619        = CGF.Builder.CreateCall2(UAddF, SizeWithoutCookie, CookieSizeV);
620
621      Size = CGF.Builder.CreateExtractValue(AddRes, 0);
622
623      llvm::Value *AddDidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1);
624      DidOverflow = CGF.Builder.CreateOr(DidOverflow, AddDidOverflow);
625    }
626
627    Size = CGF.Builder.CreateSelect(DidOverflow,
628                                    llvm::ConstantInt::get(SizeTy, -1),
629                                    Size);
630  }
631
632  if (CookieSize.isZero())
633    SizeWithoutCookie = Size;
634  else
635    assert(SizeWithoutCookie && "didn't set SizeWithoutCookie?");
636
637  return Size;
638}
639
640static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E,
641                                    llvm::Value *NewPtr) {
642
643  assert(E->getNumConstructorArgs() == 1 &&
644         "Can only have one argument to initializer of POD type.");
645
646  const Expr *Init = E->getConstructorArg(0);
647  QualType AllocType = E->getAllocatedType();
648
649  unsigned Alignment =
650    CGF.getContext().getTypeAlignInChars(AllocType).getQuantity();
651  if (!CGF.hasAggregateLLVMType(AllocType))
652    CGF.EmitStoreOfScalar(CGF.EmitScalarExpr(Init), NewPtr,
653                          AllocType.isVolatileQualified(), Alignment,
654                          AllocType);
655  else if (AllocType->isAnyComplexType())
656    CGF.EmitComplexExprIntoAddr(Init, NewPtr,
657                                AllocType.isVolatileQualified());
658  else {
659    AggValueSlot Slot
660      = AggValueSlot::forAddr(NewPtr, AllocType.isVolatileQualified(), true);
661    CGF.EmitAggExpr(Init, Slot);
662  }
663}
664
665void
666CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
667                                         llvm::Value *NewPtr,
668                                         llvm::Value *NumElements) {
669  // We have a POD type.
670  if (E->getNumConstructorArgs() == 0)
671    return;
672
673  const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
674
675  // Create a temporary for the loop index and initialize it with 0.
676  llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index");
677  llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
678  Builder.CreateStore(Zero, IndexPtr);
679
680  // Start the loop with a block that tests the condition.
681  llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
682  llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
683
684  EmitBlock(CondBlock);
685
686  llvm::BasicBlock *ForBody = createBasicBlock("for.body");
687
688  // Generate: if (loop-index < number-of-elements fall to the loop body,
689  // otherwise, go to the block after the for-loop.
690  llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
691  llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElements, "isless");
692  // If the condition is true, execute the body.
693  Builder.CreateCondBr(IsLess, ForBody, AfterFor);
694
695  EmitBlock(ForBody);
696
697  llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
698  // Inside the loop body, emit the constructor call on the array element.
699  Counter = Builder.CreateLoad(IndexPtr);
700  llvm::Value *Address = Builder.CreateInBoundsGEP(NewPtr, Counter,
701                                                   "arrayidx");
702  StoreAnyExprIntoOneUnit(*this, E, Address);
703
704  EmitBlock(ContinueBlock);
705
706  // Emit the increment of the loop counter.
707  llvm::Value *NextVal = llvm::ConstantInt::get(SizeTy, 1);
708  Counter = Builder.CreateLoad(IndexPtr);
709  NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
710  Builder.CreateStore(NextVal, IndexPtr);
711
712  // Finally, branch back up to the condition for the next iteration.
713  EmitBranch(CondBlock);
714
715  // Emit the fall-through block.
716  EmitBlock(AfterFor, true);
717}
718
719static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
720                           llvm::Value *NewPtr, llvm::Value *Size) {
721  CGF.EmitCastToVoidPtr(NewPtr);
722  CharUnits Alignment = CGF.getContext().getTypeAlignInChars(T);
723  CGF.Builder.CreateMemSet(NewPtr, CGF.Builder.getInt8(0), Size,
724                           Alignment.getQuantity(), false);
725}
726
727static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
728                               llvm::Value *NewPtr,
729                               llvm::Value *NumElements,
730                               llvm::Value *AllocSizeWithoutCookie) {
731  if (E->isArray()) {
732    if (CXXConstructorDecl *Ctor = E->getConstructor()) {
733      bool RequiresZeroInitialization = false;
734      if (Ctor->getParent()->hasTrivialConstructor()) {
735        // If new expression did not specify value-initialization, then there
736        // is no initialization.
737        if (!E->hasInitializer() || Ctor->getParent()->isEmpty())
738          return;
739
740        if (CGF.CGM.getTypes().isZeroInitializable(E->getAllocatedType())) {
741          // Optimization: since zero initialization will just set the memory
742          // to all zeroes, generate a single memset to do it in one shot.
743          EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr,
744                         AllocSizeWithoutCookie);
745          return;
746        }
747
748        RequiresZeroInitialization = true;
749      }
750
751      CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
752                                     E->constructor_arg_begin(),
753                                     E->constructor_arg_end(),
754                                     RequiresZeroInitialization);
755      return;
756    } else if (E->getNumConstructorArgs() == 1 &&
757               isa<ImplicitValueInitExpr>(E->getConstructorArg(0))) {
758      // Optimization: since zero initialization will just set the memory
759      // to all zeroes, generate a single memset to do it in one shot.
760      EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr,
761                     AllocSizeWithoutCookie);
762      return;
763    } else {
764      CGF.EmitNewArrayInitializer(E, NewPtr, NumElements);
765      return;
766    }
767  }
768
769  if (CXXConstructorDecl *Ctor = E->getConstructor()) {
770    // Per C++ [expr.new]p15, if we have an initializer, then we're performing
771    // direct initialization. C++ [dcl.init]p5 requires that we
772    // zero-initialize storage if there are no user-declared constructors.
773    if (E->hasInitializer() &&
774        !Ctor->getParent()->hasUserDeclaredConstructor() &&
775        !Ctor->getParent()->isEmpty())
776      CGF.EmitNullInitialization(NewPtr, E->getAllocatedType());
777
778    CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
779                               NewPtr, E->constructor_arg_begin(),
780                               E->constructor_arg_end());
781
782    return;
783  }
784  // We have a POD type.
785  if (E->getNumConstructorArgs() == 0)
786    return;
787
788  StoreAnyExprIntoOneUnit(CGF, E, NewPtr);
789}
790
791namespace {
792  /// A cleanup to call the given 'operator delete' function upon
793  /// abnormal exit from a new expression.
794  class CallDeleteDuringNew : public EHScopeStack::Cleanup {
795    size_t NumPlacementArgs;
796    const FunctionDecl *OperatorDelete;
797    llvm::Value *Ptr;
798    llvm::Value *AllocSize;
799
800    RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
801
802  public:
803    static size_t getExtraSize(size_t NumPlacementArgs) {
804      return NumPlacementArgs * sizeof(RValue);
805    }
806
807    CallDeleteDuringNew(size_t NumPlacementArgs,
808                        const FunctionDecl *OperatorDelete,
809                        llvm::Value *Ptr,
810                        llvm::Value *AllocSize)
811      : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
812        Ptr(Ptr), AllocSize(AllocSize) {}
813
814    void setPlacementArg(unsigned I, RValue Arg) {
815      assert(I < NumPlacementArgs && "index out of range");
816      getPlacementArgs()[I] = Arg;
817    }
818
819    void Emit(CodeGenFunction &CGF, bool IsForEH) {
820      const FunctionProtoType *FPT
821        = OperatorDelete->getType()->getAs<FunctionProtoType>();
822      assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
823             (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
824
825      CallArgList DeleteArgs;
826
827      // The first argument is always a void*.
828      FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
829      DeleteArgs.push_back(std::make_pair(RValue::get(Ptr), *AI++));
830
831      // A member 'operator delete' can take an extra 'size_t' argument.
832      if (FPT->getNumArgs() == NumPlacementArgs + 2)
833        DeleteArgs.push_back(std::make_pair(RValue::get(AllocSize), *AI++));
834
835      // Pass the rest of the arguments, which must match exactly.
836      for (unsigned I = 0; I != NumPlacementArgs; ++I)
837        DeleteArgs.push_back(std::make_pair(getPlacementArgs()[I], *AI++));
838
839      // Call 'operator delete'.
840      CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT),
841                   CGF.CGM.GetAddrOfFunction(OperatorDelete),
842                   ReturnValueSlot(), DeleteArgs, OperatorDelete);
843    }
844  };
845
846  /// A cleanup to call the given 'operator delete' function upon
847  /// abnormal exit from a new expression when the new expression is
848  /// conditional.
849  class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup {
850    size_t NumPlacementArgs;
851    const FunctionDecl *OperatorDelete;
852    DominatingValue<RValue>::saved_type Ptr;
853    DominatingValue<RValue>::saved_type AllocSize;
854
855    DominatingValue<RValue>::saved_type *getPlacementArgs() {
856      return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
857    }
858
859  public:
860    static size_t getExtraSize(size_t NumPlacementArgs) {
861      return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
862    }
863
864    CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
865                                   const FunctionDecl *OperatorDelete,
866                                   DominatingValue<RValue>::saved_type Ptr,
867                              DominatingValue<RValue>::saved_type AllocSize)
868      : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
869        Ptr(Ptr), AllocSize(AllocSize) {}
870
871    void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
872      assert(I < NumPlacementArgs && "index out of range");
873      getPlacementArgs()[I] = Arg;
874    }
875
876    void Emit(CodeGenFunction &CGF, bool IsForEH) {
877      const FunctionProtoType *FPT
878        = OperatorDelete->getType()->getAs<FunctionProtoType>();
879      assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
880             (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
881
882      CallArgList DeleteArgs;
883
884      // The first argument is always a void*.
885      FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
886      DeleteArgs.push_back(std::make_pair(Ptr.restore(CGF), *AI++));
887
888      // A member 'operator delete' can take an extra 'size_t' argument.
889      if (FPT->getNumArgs() == NumPlacementArgs + 2) {
890        RValue RV = AllocSize.restore(CGF);
891        DeleteArgs.push_back(std::make_pair(RV, *AI++));
892      }
893
894      // Pass the rest of the arguments, which must match exactly.
895      for (unsigned I = 0; I != NumPlacementArgs; ++I) {
896        RValue RV = getPlacementArgs()[I].restore(CGF);
897        DeleteArgs.push_back(std::make_pair(RV, *AI++));
898      }
899
900      // Call 'operator delete'.
901      CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT),
902                   CGF.CGM.GetAddrOfFunction(OperatorDelete),
903                   ReturnValueSlot(), DeleteArgs, OperatorDelete);
904    }
905  };
906}
907
908/// Enter a cleanup to call 'operator delete' if the initializer in a
909/// new-expression throws.
910static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
911                                  const CXXNewExpr *E,
912                                  llvm::Value *NewPtr,
913                                  llvm::Value *AllocSize,
914                                  const CallArgList &NewArgs) {
915  // If we're not inside a conditional branch, then the cleanup will
916  // dominate and we can do the easier (and more efficient) thing.
917  if (!CGF.isInConditionalBranch()) {
918    CallDeleteDuringNew *Cleanup = CGF.EHStack
919      .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
920                                                 E->getNumPlacementArgs(),
921                                                 E->getOperatorDelete(),
922                                                 NewPtr, AllocSize);
923    for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
924      Cleanup->setPlacementArg(I, NewArgs[I+1].first);
925
926    return;
927  }
928
929  // Otherwise, we need to save all this stuff.
930  DominatingValue<RValue>::saved_type SavedNewPtr =
931    DominatingValue<RValue>::save(CGF, RValue::get(NewPtr));
932  DominatingValue<RValue>::saved_type SavedAllocSize =
933    DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
934
935  CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
936    .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(InactiveEHCleanup,
937                                                 E->getNumPlacementArgs(),
938                                                 E->getOperatorDelete(),
939                                                 SavedNewPtr,
940                                                 SavedAllocSize);
941  for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
942    Cleanup->setPlacementArg(I,
943                     DominatingValue<RValue>::save(CGF, NewArgs[I+1].first));
944
945  CGF.ActivateCleanupBlock(CGF.EHStack.stable_begin());
946}
947
948llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
949  // The element type being allocated.
950  QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
951
952  // 1. Build a call to the allocation function.
953  FunctionDecl *allocator = E->getOperatorNew();
954  const FunctionProtoType *allocatorType =
955    allocator->getType()->castAs<FunctionProtoType>();
956
957  CallArgList allocatorArgs;
958
959  // The allocation size is the first argument.
960  QualType sizeType = getContext().getSizeType();
961
962  llvm::Value *numElements = 0;
963  llvm::Value *allocSizeWithoutCookie = 0;
964  llvm::Value *allocSize =
965    EmitCXXNewAllocSize(getContext(), *this, E, numElements,
966                        allocSizeWithoutCookie);
967
968  allocatorArgs.push_back(std::make_pair(RValue::get(allocSize), sizeType));
969
970  // Emit the rest of the arguments.
971  // FIXME: Ideally, this should just use EmitCallArgs.
972  CXXNewExpr::const_arg_iterator placementArg = E->placement_arg_begin();
973
974  // First, use the types from the function type.
975  // We start at 1 here because the first argument (the allocation size)
976  // has already been emitted.
977  for (unsigned i = 1, e = allocatorType->getNumArgs(); i != e;
978       ++i, ++placementArg) {
979    QualType argType = allocatorType->getArgType(i);
980
981    assert(getContext().hasSameUnqualifiedType(argType.getNonReferenceType(),
982                                               placementArg->getType()) &&
983           "type mismatch in call argument!");
984
985    EmitCallArg(allocatorArgs, *placementArg, argType);
986  }
987
988  // Either we've emitted all the call args, or we have a call to a
989  // variadic function.
990  assert((placementArg == E->placement_arg_end() ||
991          allocatorType->isVariadic()) &&
992         "Extra arguments to non-variadic function!");
993
994  // If we still have any arguments, emit them using the type of the argument.
995  for (CXXNewExpr::const_arg_iterator placementArgsEnd = E->placement_arg_end();
996       placementArg != placementArgsEnd; ++placementArg) {
997    EmitCallArg(allocatorArgs, *placementArg, placementArg->getType());
998  }
999
1000  // Emit the allocation call.
1001  RValue RV =
1002    EmitCall(CGM.getTypes().getFunctionInfo(allocatorArgs, allocatorType),
1003             CGM.GetAddrOfFunction(allocator), ReturnValueSlot(),
1004             allocatorArgs, allocator);
1005
1006  // Emit a null check on the allocation result if the allocation
1007  // function is allowed to return null (because it has a non-throwing
1008  // exception spec; for this part, we inline
1009  // CXXNewExpr::shouldNullCheckAllocation()) and we have an
1010  // interesting initializer.
1011  bool nullCheck = allocatorType->isNothrow(getContext()) &&
1012    !(allocType->isPODType() && !E->hasInitializer());
1013
1014  llvm::BasicBlock *nullCheckBB = 0;
1015  llvm::BasicBlock *contBB = 0;
1016
1017  llvm::Value *allocation = RV.getScalarVal();
1018  unsigned AS =
1019    cast<llvm::PointerType>(allocation->getType())->getAddressSpace();
1020
1021  // The null-check means that the initializer is conditionally
1022  // evaluated.
1023  ConditionalEvaluation conditional(*this);
1024
1025  if (nullCheck) {
1026    conditional.begin(*this);
1027
1028    nullCheckBB = Builder.GetInsertBlock();
1029    llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
1030    contBB = createBasicBlock("new.cont");
1031
1032    llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull");
1033    Builder.CreateCondBr(isNull, contBB, notNullBB);
1034    EmitBlock(notNullBB);
1035  }
1036
1037  assert((allocSize == allocSizeWithoutCookie) ==
1038         CalculateCookiePadding(*this, E).isZero());
1039  if (allocSize != allocSizeWithoutCookie) {
1040    assert(E->isArray());
1041    allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
1042                                                       numElements,
1043                                                       E, allocType);
1044  }
1045
1046  // If there's an operator delete, enter a cleanup to call it if an
1047  // exception is thrown.
1048  EHScopeStack::stable_iterator operatorDeleteCleanup;
1049  if (E->getOperatorDelete()) {
1050    EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs);
1051    operatorDeleteCleanup = EHStack.stable_begin();
1052  }
1053
1054  const llvm::Type *elementPtrTy
1055    = ConvertTypeForMem(allocType)->getPointerTo(AS);
1056  llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy);
1057
1058  if (E->isArray()) {
1059    EmitNewInitializer(*this, E, result, numElements, allocSizeWithoutCookie);
1060
1061    // NewPtr is a pointer to the base element type.  If we're
1062    // allocating an array of arrays, we'll need to cast back to the
1063    // array pointer type.
1064    const llvm::Type *resultType = ConvertTypeForMem(E->getType());
1065    if (result->getType() != resultType)
1066      result = Builder.CreateBitCast(result, resultType);
1067  } else {
1068    EmitNewInitializer(*this, E, result, numElements, allocSizeWithoutCookie);
1069  }
1070
1071  // Deactivate the 'operator delete' cleanup if we finished
1072  // initialization.
1073  if (operatorDeleteCleanup.isValid())
1074    DeactivateCleanupBlock(operatorDeleteCleanup);
1075
1076  if (nullCheck) {
1077    conditional.end(*this);
1078
1079    llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
1080    EmitBlock(contBB);
1081
1082    llvm::PHINode *PHI = Builder.CreatePHI(result->getType(), 2);
1083    PHI->addIncoming(result, notNullBB);
1084    PHI->addIncoming(llvm::Constant::getNullValue(result->getType()),
1085                     nullCheckBB);
1086
1087    result = PHI;
1088  }
1089
1090  return result;
1091}
1092
1093void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1094                                     llvm::Value *Ptr,
1095                                     QualType DeleteTy) {
1096  assert(DeleteFD->getOverloadedOperator() == OO_Delete);
1097
1098  const FunctionProtoType *DeleteFTy =
1099    DeleteFD->getType()->getAs<FunctionProtoType>();
1100
1101  CallArgList DeleteArgs;
1102
1103  // Check if we need to pass the size to the delete operator.
1104  llvm::Value *Size = 0;
1105  QualType SizeTy;
1106  if (DeleteFTy->getNumArgs() == 2) {
1107    SizeTy = DeleteFTy->getArgType(1);
1108    CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
1109    Size = llvm::ConstantInt::get(ConvertType(SizeTy),
1110                                  DeleteTypeSize.getQuantity());
1111  }
1112
1113  QualType ArgTy = DeleteFTy->getArgType(0);
1114  llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
1115  DeleteArgs.push_back(std::make_pair(RValue::get(DeletePtr), ArgTy));
1116
1117  if (Size)
1118    DeleteArgs.push_back(std::make_pair(RValue::get(Size), SizeTy));
1119
1120  // Emit the call to delete.
1121  EmitCall(CGM.getTypes().getFunctionInfo(DeleteArgs, DeleteFTy),
1122           CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(),
1123           DeleteArgs, DeleteFD);
1124}
1125
1126namespace {
1127  /// Calls the given 'operator delete' on a single object.
1128  struct CallObjectDelete : EHScopeStack::Cleanup {
1129    llvm::Value *Ptr;
1130    const FunctionDecl *OperatorDelete;
1131    QualType ElementType;
1132
1133    CallObjectDelete(llvm::Value *Ptr,
1134                     const FunctionDecl *OperatorDelete,
1135                     QualType ElementType)
1136      : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1137
1138    void Emit(CodeGenFunction &CGF, bool IsForEH) {
1139      CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
1140    }
1141  };
1142}
1143
1144/// Emit the code for deleting a single object.
1145static void EmitObjectDelete(CodeGenFunction &CGF,
1146                             const FunctionDecl *OperatorDelete,
1147                             llvm::Value *Ptr,
1148                             QualType ElementType) {
1149  // Find the destructor for the type, if applicable.  If the
1150  // destructor is virtual, we'll just emit the vcall and return.
1151  const CXXDestructorDecl *Dtor = 0;
1152  if (const RecordType *RT = ElementType->getAs<RecordType>()) {
1153    CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1154    if (!RD->hasTrivialDestructor()) {
1155      Dtor = RD->getDestructor();
1156
1157      if (Dtor->isVirtual()) {
1158        const llvm::Type *Ty =
1159          CGF.getTypes().GetFunctionType(CGF.getTypes().getFunctionInfo(Dtor,
1160                                                               Dtor_Complete),
1161                                         /*isVariadic=*/false);
1162
1163        llvm::Value *Callee
1164          = CGF.BuildVirtualCall(Dtor, Dtor_Deleting, Ptr, Ty);
1165        CGF.EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0,
1166                              0, 0);
1167
1168        // The dtor took care of deleting the object.
1169        return;
1170      }
1171    }
1172  }
1173
1174  // Make sure that we call delete even if the dtor throws.
1175  // This doesn't have to a conditional cleanup because we're going
1176  // to pop it off in a second.
1177  CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1178                                            Ptr, OperatorDelete, ElementType);
1179
1180  if (Dtor)
1181    CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1182                              /*ForVirtualBase=*/false, Ptr);
1183
1184  CGF.PopCleanupBlock();
1185}
1186
1187namespace {
1188  /// Calls the given 'operator delete' on an array of objects.
1189  struct CallArrayDelete : EHScopeStack::Cleanup {
1190    llvm::Value *Ptr;
1191    const FunctionDecl *OperatorDelete;
1192    llvm::Value *NumElements;
1193    QualType ElementType;
1194    CharUnits CookieSize;
1195
1196    CallArrayDelete(llvm::Value *Ptr,
1197                    const FunctionDecl *OperatorDelete,
1198                    llvm::Value *NumElements,
1199                    QualType ElementType,
1200                    CharUnits CookieSize)
1201      : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
1202        ElementType(ElementType), CookieSize(CookieSize) {}
1203
1204    void Emit(CodeGenFunction &CGF, bool IsForEH) {
1205      const FunctionProtoType *DeleteFTy =
1206        OperatorDelete->getType()->getAs<FunctionProtoType>();
1207      assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2);
1208
1209      CallArgList Args;
1210
1211      // Pass the pointer as the first argument.
1212      QualType VoidPtrTy = DeleteFTy->getArgType(0);
1213      llvm::Value *DeletePtr
1214        = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
1215      Args.push_back(std::make_pair(RValue::get(DeletePtr), VoidPtrTy));
1216
1217      // Pass the original requested size as the second argument.
1218      if (DeleteFTy->getNumArgs() == 2) {
1219        QualType size_t = DeleteFTy->getArgType(1);
1220        const llvm::IntegerType *SizeTy
1221          = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
1222
1223        CharUnits ElementTypeSize =
1224          CGF.CGM.getContext().getTypeSizeInChars(ElementType);
1225
1226        // The size of an element, multiplied by the number of elements.
1227        llvm::Value *Size
1228          = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
1229        Size = CGF.Builder.CreateMul(Size, NumElements);
1230
1231        // Plus the size of the cookie if applicable.
1232        if (!CookieSize.isZero()) {
1233          llvm::Value *CookieSizeV
1234            = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
1235          Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
1236        }
1237
1238        Args.push_back(std::make_pair(RValue::get(Size), size_t));
1239      }
1240
1241      // Emit the call to delete.
1242      CGF.EmitCall(CGF.getTypes().getFunctionInfo(Args, DeleteFTy),
1243                   CGF.CGM.GetAddrOfFunction(OperatorDelete),
1244                   ReturnValueSlot(), Args, OperatorDelete);
1245    }
1246  };
1247}
1248
1249/// Emit the code for deleting an array of objects.
1250static void EmitArrayDelete(CodeGenFunction &CGF,
1251                            const CXXDeleteExpr *E,
1252                            llvm::Value *Ptr,
1253                            QualType ElementType) {
1254  llvm::Value *NumElements = 0;
1255  llvm::Value *AllocatedPtr = 0;
1256  CharUnits CookieSize;
1257  CGF.CGM.getCXXABI().ReadArrayCookie(CGF, Ptr, E, ElementType,
1258                                      NumElements, AllocatedPtr, CookieSize);
1259
1260  assert(AllocatedPtr && "ReadArrayCookie didn't set AllocatedPtr");
1261
1262  // Make sure that we call delete even if one of the dtors throws.
1263  const FunctionDecl *OperatorDelete = E->getOperatorDelete();
1264  CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
1265                                           AllocatedPtr, OperatorDelete,
1266                                           NumElements, ElementType,
1267                                           CookieSize);
1268
1269  if (const CXXRecordDecl *RD = ElementType->getAsCXXRecordDecl()) {
1270    if (!RD->hasTrivialDestructor()) {
1271      assert(NumElements && "ReadArrayCookie didn't find element count"
1272                            " for a class with destructor");
1273      CGF.EmitCXXAggrDestructorCall(RD->getDestructor(), NumElements, Ptr);
1274    }
1275  }
1276
1277  CGF.PopCleanupBlock();
1278}
1279
1280void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
1281
1282  // Get at the argument before we performed the implicit conversion
1283  // to void*.
1284  const Expr *Arg = E->getArgument();
1285  while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
1286    if (ICE->getCastKind() != CK_UserDefinedConversion &&
1287        ICE->getType()->isVoidPointerType())
1288      Arg = ICE->getSubExpr();
1289    else
1290      break;
1291  }
1292
1293  llvm::Value *Ptr = EmitScalarExpr(Arg);
1294
1295  // Null check the pointer.
1296  llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
1297  llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
1298
1299  llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull");
1300
1301  Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
1302  EmitBlock(DeleteNotNull);
1303
1304  // We might be deleting a pointer to array.  If so, GEP down to the
1305  // first non-array element.
1306  // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
1307  QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
1308  if (DeleteTy->isConstantArrayType()) {
1309    llvm::Value *Zero = Builder.getInt32(0);
1310    llvm::SmallVector<llvm::Value*,8> GEP;
1311
1312    GEP.push_back(Zero); // point at the outermost array
1313
1314    // For each layer of array type we're pointing at:
1315    while (const ConstantArrayType *Arr
1316             = getContext().getAsConstantArrayType(DeleteTy)) {
1317      // 1. Unpeel the array type.
1318      DeleteTy = Arr->getElementType();
1319
1320      // 2. GEP to the first element of the array.
1321      GEP.push_back(Zero);
1322    }
1323
1324    Ptr = Builder.CreateInBoundsGEP(Ptr, GEP.begin(), GEP.end(), "del.first");
1325  }
1326
1327  assert(ConvertTypeForMem(DeleteTy) ==
1328         cast<llvm::PointerType>(Ptr->getType())->getElementType());
1329
1330  if (E->isArrayForm()) {
1331    EmitArrayDelete(*this, E, Ptr, DeleteTy);
1332  } else {
1333    EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy);
1334  }
1335
1336  EmitBlock(DeleteEnd);
1337}
1338
1339llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
1340  QualType Ty = E->getType();
1341  const llvm::Type *LTy = ConvertType(Ty)->getPointerTo();
1342
1343  if (E->isTypeOperand()) {
1344    llvm::Constant *TypeInfo =
1345      CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand());
1346    return Builder.CreateBitCast(TypeInfo, LTy);
1347  }
1348
1349  Expr *subE = E->getExprOperand();
1350  Ty = subE->getType();
1351  CanQualType CanTy = CGM.getContext().getCanonicalType(Ty);
1352  Ty = CanTy.getUnqualifiedType().getNonReferenceType();
1353  if (const RecordType *RT = Ty->getAs<RecordType>()) {
1354    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1355    if (RD->isPolymorphic()) {
1356      // FIXME: if subE is an lvalue do
1357      LValue Obj = EmitLValue(subE);
1358      llvm::Value *This = Obj.getAddress();
1359      // We need to do a zero check for *p, unless it has NonNullAttr.
1360      // FIXME: PointerType->hasAttr<NonNullAttr>()
1361      bool CanBeZero = false;
1362      if (UnaryOperator *UO = dyn_cast<UnaryOperator>(subE->IgnoreParens()))
1363        if (UO->getOpcode() == UO_Deref)
1364          CanBeZero = true;
1365      if (CanBeZero) {
1366        llvm::BasicBlock *NonZeroBlock = createBasicBlock();
1367        llvm::BasicBlock *ZeroBlock = createBasicBlock();
1368
1369        llvm::Value *Zero = llvm::Constant::getNullValue(This->getType());
1370        Builder.CreateCondBr(Builder.CreateICmpNE(This, Zero),
1371                             NonZeroBlock, ZeroBlock);
1372        EmitBlock(ZeroBlock);
1373        /// Call __cxa_bad_typeid
1374        const llvm::Type *ResultType = llvm::Type::getVoidTy(getLLVMContext());
1375        const llvm::FunctionType *FTy;
1376        FTy = llvm::FunctionType::get(ResultType, false);
1377        llvm::Value *F = CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1378        Builder.CreateCall(F)->setDoesNotReturn();
1379        Builder.CreateUnreachable();
1380        EmitBlock(NonZeroBlock);
1381      }
1382      llvm::Value *V = GetVTablePtr(This, LTy->getPointerTo());
1383      V = Builder.CreateConstInBoundsGEP1_64(V, -1ULL);
1384      V = Builder.CreateLoad(V);
1385      return V;
1386    }
1387  }
1388  return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(Ty), LTy);
1389}
1390
1391llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *V,
1392                                              const CXXDynamicCastExpr *DCE) {
1393  QualType SrcTy = DCE->getSubExpr()->getType();
1394  QualType DestTy = DCE->getTypeAsWritten();
1395  QualType InnerType = DestTy->getPointeeType();
1396
1397  const llvm::Type *LTy = ConvertType(DCE->getType());
1398
1399  bool CanBeZero = false;
1400  bool ToVoid = false;
1401  bool ThrowOnBad = false;
1402  if (DestTy->isPointerType()) {
1403    // FIXME: if PointerType->hasAttr<NonNullAttr>(), we don't set this
1404    CanBeZero = true;
1405    if (InnerType->isVoidType())
1406      ToVoid = true;
1407  } else {
1408    LTy = LTy->getPointerTo();
1409
1410    // FIXME: What if exceptions are disabled?
1411    ThrowOnBad = true;
1412  }
1413
1414  if (SrcTy->isPointerType() || SrcTy->isReferenceType())
1415    SrcTy = SrcTy->getPointeeType();
1416  SrcTy = SrcTy.getUnqualifiedType();
1417
1418  if (DestTy->isPointerType() || DestTy->isReferenceType())
1419    DestTy = DestTy->getPointeeType();
1420  DestTy = DestTy.getUnqualifiedType();
1421
1422  llvm::BasicBlock *ContBlock = createBasicBlock();
1423  llvm::BasicBlock *NullBlock = 0;
1424  llvm::BasicBlock *NonZeroBlock = 0;
1425  if (CanBeZero) {
1426    NonZeroBlock = createBasicBlock();
1427    NullBlock = createBasicBlock();
1428    Builder.CreateCondBr(Builder.CreateIsNotNull(V), NonZeroBlock, NullBlock);
1429    EmitBlock(NonZeroBlock);
1430  }
1431
1432  llvm::BasicBlock *BadCastBlock = 0;
1433
1434  const llvm::Type *PtrDiffTy = ConvertType(getContext().getPointerDiffType());
1435
1436  // See if this is a dynamic_cast(void*)
1437  if (ToVoid) {
1438    llvm::Value *This = V;
1439    V = GetVTablePtr(This, PtrDiffTy->getPointerTo());
1440    V = Builder.CreateConstInBoundsGEP1_64(V, -2ULL);
1441    V = Builder.CreateLoad(V, "offset to top");
1442    This = EmitCastToVoidPtr(This);
1443    V = Builder.CreateInBoundsGEP(This, V);
1444    V = Builder.CreateBitCast(V, LTy);
1445  } else {
1446    /// Call __dynamic_cast
1447    const llvm::Type *ResultType = Int8PtrTy;
1448    const llvm::FunctionType *FTy;
1449    std::vector<const llvm::Type*> ArgTys;
1450    ArgTys.push_back(Int8PtrTy);
1451    ArgTys.push_back(Int8PtrTy);
1452    ArgTys.push_back(Int8PtrTy);
1453    ArgTys.push_back(PtrDiffTy);
1454    FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
1455
1456    // FIXME: Calculate better hint.
1457    llvm::Value *hint = llvm::ConstantInt::get(PtrDiffTy, -1ULL);
1458
1459    assert(SrcTy->isRecordType() && "Src type must be record type!");
1460    assert(DestTy->isRecordType() && "Dest type must be record type!");
1461
1462    llvm::Value *SrcArg
1463      = CGM.GetAddrOfRTTIDescriptor(SrcTy.getUnqualifiedType());
1464    llvm::Value *DestArg
1465      = CGM.GetAddrOfRTTIDescriptor(DestTy.getUnqualifiedType());
1466
1467    V = Builder.CreateBitCast(V, Int8PtrTy);
1468    V = Builder.CreateCall4(CGM.CreateRuntimeFunction(FTy, "__dynamic_cast"),
1469                            V, SrcArg, DestArg, hint);
1470    V = Builder.CreateBitCast(V, LTy);
1471
1472    if (ThrowOnBad) {
1473      BadCastBlock = createBasicBlock();
1474      Builder.CreateCondBr(Builder.CreateIsNotNull(V), ContBlock, BadCastBlock);
1475      EmitBlock(BadCastBlock);
1476      /// Invoke __cxa_bad_cast
1477      ResultType = llvm::Type::getVoidTy(getLLVMContext());
1478      const llvm::FunctionType *FBadTy;
1479      FBadTy = llvm::FunctionType::get(ResultType, false);
1480      llvm::Value *F = CGM.CreateRuntimeFunction(FBadTy, "__cxa_bad_cast");
1481      if (llvm::BasicBlock *InvokeDest = getInvokeDest()) {
1482        llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
1483        Builder.CreateInvoke(F, Cont, InvokeDest)->setDoesNotReturn();
1484        EmitBlock(Cont);
1485      } else {
1486        // FIXME: Does this ever make sense?
1487        Builder.CreateCall(F)->setDoesNotReturn();
1488      }
1489      Builder.CreateUnreachable();
1490    }
1491  }
1492
1493  if (CanBeZero) {
1494    Builder.CreateBr(ContBlock);
1495    EmitBlock(NullBlock);
1496    Builder.CreateBr(ContBlock);
1497  }
1498  EmitBlock(ContBlock);
1499  if (CanBeZero) {
1500    llvm::PHINode *PHI = Builder.CreatePHI(LTy, 2);
1501    PHI->addIncoming(V, NonZeroBlock);
1502    PHI->addIncoming(llvm::Constant::getNullValue(LTy), NullBlock);
1503    V = PHI;
1504  }
1505
1506  return V;
1507}
1508