CGExprCXX.cpp revision 4bdbc0cb1e29ac870b7e7985cf4a1b5c34176e8c
1//===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This contains code dealing with code generation of C++ expressions
11//
12//===----------------------------------------------------------------------===//
13
14#include "clang/Frontend/CodeGenOptions.h"
15#include "CodeGenFunction.h"
16#include "CGCXXABI.h"
17#include "CGObjCRuntime.h"
18#include "CGDebugInfo.h"
19#include "llvm/Intrinsics.h"
20using namespace clang;
21using namespace CodeGen;
22
23RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
24                                          llvm::Value *Callee,
25                                          ReturnValueSlot ReturnValue,
26                                          llvm::Value *This,
27                                          llvm::Value *VTT,
28                                          CallExpr::const_arg_iterator ArgBeg,
29                                          CallExpr::const_arg_iterator ArgEnd) {
30  assert(MD->isInstance() &&
31         "Trying to emit a member call expr on a static method!");
32
33  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
34
35  CallArgList Args;
36
37  // Push the this ptr.
38  Args.push_back(std::make_pair(RValue::get(This),
39                                MD->getThisType(getContext())));
40
41  // If there is a VTT parameter, emit it.
42  if (VTT) {
43    QualType T = getContext().getPointerType(getContext().VoidPtrTy);
44    Args.push_back(std::make_pair(RValue::get(VTT), T));
45  }
46
47  // And the rest of the call args
48  EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
49
50  QualType ResultType = FPT->getResultType();
51  return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args,
52                                                 FPT->getExtInfo()),
53                  Callee, ReturnValue, Args, MD);
54}
55
56static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) {
57  const Expr *E = Base;
58
59  while (true) {
60    E = E->IgnoreParens();
61    if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
62      if (CE->getCastKind() == CK_DerivedToBase ||
63          CE->getCastKind() == CK_UncheckedDerivedToBase ||
64          CE->getCastKind() == CK_NoOp) {
65        E = CE->getSubExpr();
66        continue;
67      }
68    }
69
70    break;
71  }
72
73  QualType DerivedType = E->getType();
74  if (const PointerType *PTy = DerivedType->getAs<PointerType>())
75    DerivedType = PTy->getPointeeType();
76
77  return cast<CXXRecordDecl>(DerivedType->castAs<RecordType>()->getDecl());
78}
79
80// FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
81// quite what we want.
82static const Expr *skipNoOpCastsAndParens(const Expr *E) {
83  while (true) {
84    if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
85      E = PE->getSubExpr();
86      continue;
87    }
88
89    if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
90      if (CE->getCastKind() == CK_NoOp) {
91        E = CE->getSubExpr();
92        continue;
93      }
94    }
95    if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
96      if (UO->getOpcode() == UO_Extension) {
97        E = UO->getSubExpr();
98        continue;
99      }
100    }
101    return E;
102  }
103}
104
105/// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
106/// expr can be devirtualized.
107static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
108                                               const Expr *Base,
109                                               const CXXMethodDecl *MD) {
110
111  // When building with -fapple-kext, all calls must go through the vtable since
112  // the kernel linker can do runtime patching of vtables.
113  if (Context.getLangOptions().AppleKext)
114    return false;
115
116  // If the most derived class is marked final, we know that no subclass can
117  // override this member function and so we can devirtualize it. For example:
118  //
119  // struct A { virtual void f(); }
120  // struct B final : A { };
121  //
122  // void f(B *b) {
123  //   b->f();
124  // }
125  //
126  const CXXRecordDecl *MostDerivedClassDecl = getMostDerivedClassDecl(Base);
127  if (MostDerivedClassDecl->hasAttr<FinalAttr>())
128    return true;
129
130  // If the member function is marked 'final', we know that it can't be
131  // overridden and can therefore devirtualize it.
132  if (MD->hasAttr<FinalAttr>())
133    return true;
134
135  // Similarly, if the class itself is marked 'final' it can't be overridden
136  // and we can therefore devirtualize the member function call.
137  if (MD->getParent()->hasAttr<FinalAttr>())
138    return true;
139
140  Base = skipNoOpCastsAndParens(Base);
141  if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
142    if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
143      // This is a record decl. We know the type and can devirtualize it.
144      return VD->getType()->isRecordType();
145    }
146
147    return false;
148  }
149
150  // We can always devirtualize calls on temporary object expressions.
151  if (isa<CXXConstructExpr>(Base))
152    return true;
153
154  // And calls on bound temporaries.
155  if (isa<CXXBindTemporaryExpr>(Base))
156    return true;
157
158  // Check if this is a call expr that returns a record type.
159  if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
160    return CE->getCallReturnType()->isRecordType();
161
162  // We can't devirtualize the call.
163  return false;
164}
165
166// Note: This function also emit constructor calls to support a MSVC
167// extensions allowing explicit constructor function call.
168RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
169                                              ReturnValueSlot ReturnValue) {
170  const Expr *callee = CE->getCallee()->IgnoreParens();
171
172  if (isa<BinaryOperator>(callee))
173    return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
174
175  const MemberExpr *ME = cast<MemberExpr>(callee);
176  const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
177
178  CGDebugInfo *DI = getDebugInfo();
179  if (DI && CGM.getCodeGenOpts().LimitDebugInfo
180      && !isa<CallExpr>(ME->getBase())) {
181    QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType();
182    if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) {
183      DI->getOrCreateRecordType(PTy->getPointeeType(),
184                                MD->getParent()->getLocation());
185    }
186  }
187
188  if (MD->isStatic()) {
189    // The method is static, emit it as we would a regular call.
190    llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
191    return EmitCall(getContext().getPointerType(MD->getType()), Callee,
192                    ReturnValue, CE->arg_begin(), CE->arg_end());
193  }
194
195  // Compute the object pointer.
196  llvm::Value *This;
197  if (ME->isArrow())
198    This = EmitScalarExpr(ME->getBase());
199  else
200    This = EmitLValue(ME->getBase()).getAddress();
201
202  if (MD->isTrivial()) {
203    if (isa<CXXDestructorDecl>(MD)) return RValue::get(0);
204    if (isa<CXXConstructorDecl>(MD) &&
205        cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
206      return RValue::get(0);
207
208    if (MD->isCopyAssignmentOperator()) {
209      // We don't like to generate the trivial copy assignment operator when
210      // it isn't necessary; just produce the proper effect here.
211      llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
212      EmitAggregateCopy(This, RHS, CE->getType());
213      return RValue::get(This);
214    }
215
216    if (isa<CXXConstructorDecl>(MD) &&
217        cast<CXXConstructorDecl>(MD)->isCopyConstructor()) {
218      llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
219      EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS,
220                                     CE->arg_begin(), CE->arg_end());
221      return RValue::get(This);
222    }
223    llvm_unreachable("unknown trivial member function");
224  }
225
226  // Compute the function type we're calling.
227  const CGFunctionInfo *FInfo = 0;
228  if (isa<CXXDestructorDecl>(MD))
229    FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXDestructorDecl>(MD),
230                                           Dtor_Complete);
231  else if (isa<CXXConstructorDecl>(MD))
232    FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXConstructorDecl>(MD),
233                                            Ctor_Complete);
234  else
235    FInfo = &CGM.getTypes().getFunctionInfo(MD);
236
237  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
238  const llvm::Type *Ty
239    = CGM.getTypes().GetFunctionType(*FInfo, FPT->isVariadic());
240
241  // C++ [class.virtual]p12:
242  //   Explicit qualification with the scope operator (5.1) suppresses the
243  //   virtual call mechanism.
244  //
245  // We also don't emit a virtual call if the base expression has a record type
246  // because then we know what the type is.
247  bool UseVirtualCall;
248  UseVirtualCall = MD->isVirtual() && !ME->hasQualifier()
249                   && !canDevirtualizeMemberFunctionCalls(getContext(),
250                                                          ME->getBase(), MD);
251  llvm::Value *Callee;
252  if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
253    if (UseVirtualCall) {
254      Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty);
255    } else {
256      if (getContext().getLangOptions().AppleKext &&
257          MD->isVirtual() &&
258          ME->hasQualifier())
259        Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
260      else
261        Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty);
262    }
263  } else if (const CXXConstructorDecl *Ctor =
264               dyn_cast<CXXConstructorDecl>(MD)) {
265    Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
266  } else if (UseVirtualCall) {
267      Callee = BuildVirtualCall(MD, This, Ty);
268  } else {
269    if (getContext().getLangOptions().AppleKext &&
270        MD->isVirtual() &&
271        ME->hasQualifier())
272      Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
273    else
274      Callee = CGM.GetAddrOfFunction(MD, Ty);
275  }
276
277  return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
278                           CE->arg_begin(), CE->arg_end());
279}
280
281RValue
282CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
283                                              ReturnValueSlot ReturnValue) {
284  const BinaryOperator *BO =
285      cast<BinaryOperator>(E->getCallee()->IgnoreParens());
286  const Expr *BaseExpr = BO->getLHS();
287  const Expr *MemFnExpr = BO->getRHS();
288
289  const MemberPointerType *MPT =
290    MemFnExpr->getType()->getAs<MemberPointerType>();
291
292  const FunctionProtoType *FPT =
293    MPT->getPointeeType()->getAs<FunctionProtoType>();
294  const CXXRecordDecl *RD =
295    cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
296
297  // Get the member function pointer.
298  llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
299
300  // Emit the 'this' pointer.
301  llvm::Value *This;
302
303  if (BO->getOpcode() == BO_PtrMemI)
304    This = EmitScalarExpr(BaseExpr);
305  else
306    This = EmitLValue(BaseExpr).getAddress();
307
308  // Ask the ABI to load the callee.  Note that This is modified.
309  llvm::Value *Callee =
310    CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, This, MemFnPtr, MPT);
311
312  CallArgList Args;
313
314  QualType ThisType =
315    getContext().getPointerType(getContext().getTagDeclType(RD));
316
317  // Push the this ptr.
318  Args.push_back(std::make_pair(RValue::get(This), ThisType));
319
320  // And the rest of the call args
321  EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
322  const FunctionType *BO_FPT = BO->getType()->getAs<FunctionProtoType>();
323  return EmitCall(CGM.getTypes().getFunctionInfo(Args, BO_FPT), Callee,
324                  ReturnValue, Args);
325}
326
327RValue
328CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
329                                               const CXXMethodDecl *MD,
330                                               ReturnValueSlot ReturnValue) {
331  assert(MD->isInstance() &&
332         "Trying to emit a member call expr on a static method!");
333  LValue LV = EmitLValue(E->getArg(0));
334  llvm::Value *This = LV.getAddress();
335
336  if (MD->isCopyAssignmentOperator()) {
337    const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(MD->getDeclContext());
338    if (ClassDecl->hasTrivialCopyAssignment()) {
339      assert(!ClassDecl->hasUserDeclaredCopyAssignment() &&
340             "EmitCXXOperatorMemberCallExpr - user declared copy assignment");
341      llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
342      QualType Ty = E->getType();
343      EmitAggregateCopy(This, Src, Ty);
344      return RValue::get(This);
345    }
346  }
347
348  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
349  const llvm::Type *Ty =
350    CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
351                                   FPT->isVariadic());
352  llvm::Value *Callee;
353  if (MD->isVirtual() &&
354      !canDevirtualizeMemberFunctionCalls(getContext(),
355                                           E->getArg(0), MD))
356    Callee = BuildVirtualCall(MD, This, Ty);
357  else
358    Callee = CGM.GetAddrOfFunction(MD, Ty);
359
360  return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
361                           E->arg_begin() + 1, E->arg_end());
362}
363
364void
365CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
366                                      AggValueSlot Dest) {
367  assert(!Dest.isIgnored() && "Must have a destination!");
368  const CXXConstructorDecl *CD = E->getConstructor();
369
370  // If we require zero initialization before (or instead of) calling the
371  // constructor, as can be the case with a non-user-provided default
372  // constructor, emit the zero initialization now.
373  if (E->requiresZeroInitialization())
374    EmitNullInitialization(Dest.getAddr(), E->getType());
375
376  // If this is a call to a trivial default constructor, do nothing.
377  if (CD->isTrivial() && CD->isDefaultConstructor())
378    return;
379
380  // Elide the constructor if we're constructing from a temporary.
381  // The temporary check is required because Sema sets this on NRVO
382  // returns.
383  if (getContext().getLangOptions().ElideConstructors && E->isElidable()) {
384    assert(getContext().hasSameUnqualifiedType(E->getType(),
385                                               E->getArg(0)->getType()));
386    if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
387      EmitAggExpr(E->getArg(0), Dest);
388      return;
389    }
390  }
391
392  const ConstantArrayType *Array
393    = getContext().getAsConstantArrayType(E->getType());
394  if (Array) {
395    QualType BaseElementTy = getContext().getBaseElementType(Array);
396    const llvm::Type *BasePtr = ConvertType(BaseElementTy);
397    BasePtr = llvm::PointerType::getUnqual(BasePtr);
398    llvm::Value *BaseAddrPtr =
399      Builder.CreateBitCast(Dest.getAddr(), BasePtr);
400
401    EmitCXXAggrConstructorCall(CD, Array, BaseAddrPtr,
402                               E->arg_begin(), E->arg_end());
403  }
404  else {
405    CXXCtorType Type =
406      (E->getConstructionKind() == CXXConstructExpr::CK_Complete)
407      ? Ctor_Complete : Ctor_Base;
408    bool ForVirtualBase =
409      E->getConstructionKind() == CXXConstructExpr::CK_VirtualBase;
410
411    // Call the constructor.
412    EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest.getAddr(),
413                           E->arg_begin(), E->arg_end());
414  }
415}
416
417void
418CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
419                                            llvm::Value *Src,
420                                            const Expr *Exp) {
421  if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
422    Exp = E->getSubExpr();
423  assert(isa<CXXConstructExpr>(Exp) &&
424         "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
425  const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
426  const CXXConstructorDecl *CD = E->getConstructor();
427  RunCleanupsScope Scope(*this);
428
429  // If we require zero initialization before (or instead of) calling the
430  // constructor, as can be the case with a non-user-provided default
431  // constructor, emit the zero initialization now.
432  // FIXME. Do I still need this for a copy ctor synthesis?
433  if (E->requiresZeroInitialization())
434    EmitNullInitialization(Dest, E->getType());
435
436  assert(!getContext().getAsConstantArrayType(E->getType())
437         && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
438  EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src,
439                                 E->arg_begin(), E->arg_end());
440}
441
442/// Check whether the given operator new[] is the global placement
443/// operator new[].
444static bool IsPlacementOperatorNewArray(ASTContext &Ctx,
445                                        const FunctionDecl *Fn) {
446  // Must be in global scope.  Note that allocation functions can't be
447  // declared in namespaces.
448  if (!Fn->getDeclContext()->getRedeclContext()->isFileContext())
449    return false;
450
451  // Signature must be void *operator new[](size_t, void*).
452  // The size_t is common to all operator new[]s.
453  if (Fn->getNumParams() != 2)
454    return false;
455
456  CanQualType ParamType = Ctx.getCanonicalType(Fn->getParamDecl(1)->getType());
457  return (ParamType == Ctx.VoidPtrTy);
458}
459
460static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
461                                        const CXXNewExpr *E) {
462  if (!E->isArray())
463    return CharUnits::Zero();
464
465  // No cookie is required if the new operator being used is
466  // ::operator new[](size_t, void*).
467  const FunctionDecl *OperatorNew = E->getOperatorNew();
468  if (IsPlacementOperatorNewArray(CGF.getContext(), OperatorNew))
469    return CharUnits::Zero();
470
471  return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
472}
473
474static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context,
475                                        CodeGenFunction &CGF,
476                                        const CXXNewExpr *E,
477                                        llvm::Value *&NumElements,
478                                        llvm::Value *&SizeWithoutCookie) {
479  QualType ElemType = E->getAllocatedType();
480
481  const llvm::IntegerType *SizeTy =
482    cast<llvm::IntegerType>(CGF.ConvertType(CGF.getContext().getSizeType()));
483
484  CharUnits TypeSize = CGF.getContext().getTypeSizeInChars(ElemType);
485
486  if (!E->isArray()) {
487    SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
488    return SizeWithoutCookie;
489  }
490
491  // Figure out the cookie size.
492  CharUnits CookieSize = CalculateCookiePadding(CGF, E);
493
494  // Emit the array size expression.
495  // We multiply the size of all dimensions for NumElements.
496  // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
497  NumElements = CGF.EmitScalarExpr(E->getArraySize());
498  assert(NumElements->getType() == SizeTy && "element count not a size_t");
499
500  uint64_t ArraySizeMultiplier = 1;
501  while (const ConstantArrayType *CAT
502             = CGF.getContext().getAsConstantArrayType(ElemType)) {
503    ElemType = CAT->getElementType();
504    ArraySizeMultiplier *= CAT->getSize().getZExtValue();
505  }
506
507  llvm::Value *Size;
508
509  // If someone is doing 'new int[42]' there is no need to do a dynamic check.
510  // Don't bloat the -O0 code.
511  if (llvm::ConstantInt *NumElementsC =
512        dyn_cast<llvm::ConstantInt>(NumElements)) {
513    llvm::APInt NEC = NumElementsC->getValue();
514    unsigned SizeWidth = NEC.getBitWidth();
515
516    // Determine if there is an overflow here by doing an extended multiply.
517    NEC = NEC.zext(SizeWidth*2);
518    llvm::APInt SC(SizeWidth*2, TypeSize.getQuantity());
519    SC *= NEC;
520
521    if (!CookieSize.isZero()) {
522      // Save the current size without a cookie.  We don't care if an
523      // overflow's already happened because SizeWithoutCookie isn't
524      // used if the allocator returns null or throws, as it should
525      // always do on an overflow.
526      llvm::APInt SWC = SC.trunc(SizeWidth);
527      SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, SWC);
528
529      // Add the cookie size.
530      SC += llvm::APInt(SizeWidth*2, CookieSize.getQuantity());
531    }
532
533    if (SC.countLeadingZeros() >= SizeWidth) {
534      SC = SC.trunc(SizeWidth);
535      Size = llvm::ConstantInt::get(SizeTy, SC);
536    } else {
537      // On overflow, produce a -1 so operator new throws.
538      Size = llvm::Constant::getAllOnesValue(SizeTy);
539    }
540
541    // Scale NumElements while we're at it.
542    uint64_t N = NEC.getZExtValue() * ArraySizeMultiplier;
543    NumElements = llvm::ConstantInt::get(SizeTy, N);
544
545  // Otherwise, we don't need to do an overflow-checked multiplication if
546  // we're multiplying by one.
547  } else if (TypeSize.isOne()) {
548    assert(ArraySizeMultiplier == 1);
549
550    Size = NumElements;
551
552    // If we need a cookie, add its size in with an overflow check.
553    // This is maybe a little paranoid.
554    if (!CookieSize.isZero()) {
555      SizeWithoutCookie = Size;
556
557      llvm::Value *CookieSizeV
558        = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
559
560      const llvm::Type *Types[] = { SizeTy };
561      llvm::Value *UAddF
562        = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1);
563      llvm::Value *AddRes
564        = CGF.Builder.CreateCall2(UAddF, Size, CookieSizeV);
565
566      Size = CGF.Builder.CreateExtractValue(AddRes, 0);
567      llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1);
568      Size = CGF.Builder.CreateSelect(DidOverflow,
569                                      llvm::ConstantInt::get(SizeTy, -1),
570                                      Size);
571    }
572
573  // Otherwise use the int.umul.with.overflow intrinsic.
574  } else {
575    llvm::Value *OutermostElementSize
576      = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
577
578    llvm::Value *NumOutermostElements = NumElements;
579
580    // Scale NumElements by the array size multiplier.  This might
581    // overflow, but only if the multiplication below also overflows,
582    // in which case this multiplication isn't used.
583    if (ArraySizeMultiplier != 1)
584      NumElements = CGF.Builder.CreateMul(NumElements,
585                         llvm::ConstantInt::get(SizeTy, ArraySizeMultiplier));
586
587    // The requested size of the outermost array is non-constant.
588    // Multiply that by the static size of the elements of that array;
589    // on unsigned overflow, set the size to -1 to trigger an
590    // exception from the allocation routine.  This is sufficient to
591    // prevent buffer overruns from the allocator returning a
592    // seemingly valid pointer to insufficient space.  This idea comes
593    // originally from MSVC, and GCC has an open bug requesting
594    // similar behavior:
595    //   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=19351
596    //
597    // This will not be sufficient for C++0x, which requires a
598    // specific exception class (std::bad_array_new_length).
599    // That will require ABI support that has not yet been specified.
600    const llvm::Type *Types[] = { SizeTy };
601    llvm::Value *UMulF
602      = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, Types, 1);
603    llvm::Value *MulRes = CGF.Builder.CreateCall2(UMulF, NumOutermostElements,
604                                                  OutermostElementSize);
605
606    // The overflow bit.
607    llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(MulRes, 1);
608
609    // The result of the multiplication.
610    Size = CGF.Builder.CreateExtractValue(MulRes, 0);
611
612    // If we have a cookie, we need to add that size in, too.
613    if (!CookieSize.isZero()) {
614      SizeWithoutCookie = Size;
615
616      llvm::Value *CookieSizeV
617        = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
618      llvm::Value *UAddF
619        = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1);
620      llvm::Value *AddRes
621        = CGF.Builder.CreateCall2(UAddF, SizeWithoutCookie, CookieSizeV);
622
623      Size = CGF.Builder.CreateExtractValue(AddRes, 0);
624
625      llvm::Value *AddDidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1);
626      DidOverflow = CGF.Builder.CreateOr(DidOverflow, AddDidOverflow);
627    }
628
629    Size = CGF.Builder.CreateSelect(DidOverflow,
630                                    llvm::ConstantInt::get(SizeTy, -1),
631                                    Size);
632  }
633
634  if (CookieSize.isZero())
635    SizeWithoutCookie = Size;
636  else
637    assert(SizeWithoutCookie && "didn't set SizeWithoutCookie?");
638
639  return Size;
640}
641
642static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E,
643                                    llvm::Value *NewPtr) {
644
645  assert(E->getNumConstructorArgs() == 1 &&
646         "Can only have one argument to initializer of POD type.");
647
648  const Expr *Init = E->getConstructorArg(0);
649  QualType AllocType = E->getAllocatedType();
650
651  unsigned Alignment =
652    CGF.getContext().getTypeAlignInChars(AllocType).getQuantity();
653  if (!CGF.hasAggregateLLVMType(AllocType))
654    CGF.EmitStoreOfScalar(CGF.EmitScalarExpr(Init), NewPtr,
655                          AllocType.isVolatileQualified(), Alignment,
656                          AllocType);
657  else if (AllocType->isAnyComplexType())
658    CGF.EmitComplexExprIntoAddr(Init, NewPtr,
659                                AllocType.isVolatileQualified());
660  else {
661    AggValueSlot Slot
662      = AggValueSlot::forAddr(NewPtr, AllocType.isVolatileQualified(), true);
663    CGF.EmitAggExpr(Init, Slot);
664  }
665}
666
667void
668CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
669                                         llvm::Value *NewPtr,
670                                         llvm::Value *NumElements) {
671  // We have a POD type.
672  if (E->getNumConstructorArgs() == 0)
673    return;
674
675  const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
676
677  // Create a temporary for the loop index and initialize it with 0.
678  llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index");
679  llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
680  Builder.CreateStore(Zero, IndexPtr);
681
682  // Start the loop with a block that tests the condition.
683  llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
684  llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
685
686  EmitBlock(CondBlock);
687
688  llvm::BasicBlock *ForBody = createBasicBlock("for.body");
689
690  // Generate: if (loop-index < number-of-elements fall to the loop body,
691  // otherwise, go to the block after the for-loop.
692  llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
693  llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElements, "isless");
694  // If the condition is true, execute the body.
695  Builder.CreateCondBr(IsLess, ForBody, AfterFor);
696
697  EmitBlock(ForBody);
698
699  llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
700  // Inside the loop body, emit the constructor call on the array element.
701  Counter = Builder.CreateLoad(IndexPtr);
702  llvm::Value *Address = Builder.CreateInBoundsGEP(NewPtr, Counter,
703                                                   "arrayidx");
704  StoreAnyExprIntoOneUnit(*this, E, Address);
705
706  EmitBlock(ContinueBlock);
707
708  // Emit the increment of the loop counter.
709  llvm::Value *NextVal = llvm::ConstantInt::get(SizeTy, 1);
710  Counter = Builder.CreateLoad(IndexPtr);
711  NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
712  Builder.CreateStore(NextVal, IndexPtr);
713
714  // Finally, branch back up to the condition for the next iteration.
715  EmitBranch(CondBlock);
716
717  // Emit the fall-through block.
718  EmitBlock(AfterFor, true);
719}
720
721static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
722                           llvm::Value *NewPtr, llvm::Value *Size) {
723  CGF.EmitCastToVoidPtr(NewPtr);
724  CharUnits Alignment = CGF.getContext().getTypeAlignInChars(T);
725  CGF.Builder.CreateMemSet(NewPtr, CGF.Builder.getInt8(0), Size,
726                           Alignment.getQuantity(), false);
727}
728
729static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
730                               llvm::Value *NewPtr,
731                               llvm::Value *NumElements,
732                               llvm::Value *AllocSizeWithoutCookie) {
733  if (E->isArray()) {
734    if (CXXConstructorDecl *Ctor = E->getConstructor()) {
735      bool RequiresZeroInitialization = false;
736      if (Ctor->getParent()->hasTrivialConstructor()) {
737        // If new expression did not specify value-initialization, then there
738        // is no initialization.
739        if (!E->hasInitializer() || Ctor->getParent()->isEmpty())
740          return;
741
742        if (CGF.CGM.getTypes().isZeroInitializable(E->getAllocatedType())) {
743          // Optimization: since zero initialization will just set the memory
744          // to all zeroes, generate a single memset to do it in one shot.
745          EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr,
746                         AllocSizeWithoutCookie);
747          return;
748        }
749
750        RequiresZeroInitialization = true;
751      }
752
753      CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
754                                     E->constructor_arg_begin(),
755                                     E->constructor_arg_end(),
756                                     RequiresZeroInitialization);
757      return;
758    } else if (E->getNumConstructorArgs() == 1 &&
759               isa<ImplicitValueInitExpr>(E->getConstructorArg(0))) {
760      // Optimization: since zero initialization will just set the memory
761      // to all zeroes, generate a single memset to do it in one shot.
762      EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr,
763                     AllocSizeWithoutCookie);
764      return;
765    } else {
766      CGF.EmitNewArrayInitializer(E, NewPtr, NumElements);
767      return;
768    }
769  }
770
771  if (CXXConstructorDecl *Ctor = E->getConstructor()) {
772    // Per C++ [expr.new]p15, if we have an initializer, then we're performing
773    // direct initialization. C++ [dcl.init]p5 requires that we
774    // zero-initialize storage if there are no user-declared constructors.
775    if (E->hasInitializer() &&
776        !Ctor->getParent()->hasUserDeclaredConstructor() &&
777        !Ctor->getParent()->isEmpty())
778      CGF.EmitNullInitialization(NewPtr, E->getAllocatedType());
779
780    CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
781                               NewPtr, E->constructor_arg_begin(),
782                               E->constructor_arg_end());
783
784    return;
785  }
786  // We have a POD type.
787  if (E->getNumConstructorArgs() == 0)
788    return;
789
790  StoreAnyExprIntoOneUnit(CGF, E, NewPtr);
791}
792
793namespace {
794  /// A cleanup to call the given 'operator delete' function upon
795  /// abnormal exit from a new expression.
796  class CallDeleteDuringNew : public EHScopeStack::Cleanup {
797    size_t NumPlacementArgs;
798    const FunctionDecl *OperatorDelete;
799    llvm::Value *Ptr;
800    llvm::Value *AllocSize;
801
802    RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
803
804  public:
805    static size_t getExtraSize(size_t NumPlacementArgs) {
806      return NumPlacementArgs * sizeof(RValue);
807    }
808
809    CallDeleteDuringNew(size_t NumPlacementArgs,
810                        const FunctionDecl *OperatorDelete,
811                        llvm::Value *Ptr,
812                        llvm::Value *AllocSize)
813      : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
814        Ptr(Ptr), AllocSize(AllocSize) {}
815
816    void setPlacementArg(unsigned I, RValue Arg) {
817      assert(I < NumPlacementArgs && "index out of range");
818      getPlacementArgs()[I] = Arg;
819    }
820
821    void Emit(CodeGenFunction &CGF, bool IsForEH) {
822      const FunctionProtoType *FPT
823        = OperatorDelete->getType()->getAs<FunctionProtoType>();
824      assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
825             (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
826
827      CallArgList DeleteArgs;
828
829      // The first argument is always a void*.
830      FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
831      DeleteArgs.push_back(std::make_pair(RValue::get(Ptr), *AI++));
832
833      // A member 'operator delete' can take an extra 'size_t' argument.
834      if (FPT->getNumArgs() == NumPlacementArgs + 2)
835        DeleteArgs.push_back(std::make_pair(RValue::get(AllocSize), *AI++));
836
837      // Pass the rest of the arguments, which must match exactly.
838      for (unsigned I = 0; I != NumPlacementArgs; ++I)
839        DeleteArgs.push_back(std::make_pair(getPlacementArgs()[I], *AI++));
840
841      // Call 'operator delete'.
842      CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT),
843                   CGF.CGM.GetAddrOfFunction(OperatorDelete),
844                   ReturnValueSlot(), DeleteArgs, OperatorDelete);
845    }
846  };
847
848  /// A cleanup to call the given 'operator delete' function upon
849  /// abnormal exit from a new expression when the new expression is
850  /// conditional.
851  class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup {
852    size_t NumPlacementArgs;
853    const FunctionDecl *OperatorDelete;
854    DominatingValue<RValue>::saved_type Ptr;
855    DominatingValue<RValue>::saved_type AllocSize;
856
857    DominatingValue<RValue>::saved_type *getPlacementArgs() {
858      return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
859    }
860
861  public:
862    static size_t getExtraSize(size_t NumPlacementArgs) {
863      return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
864    }
865
866    CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
867                                   const FunctionDecl *OperatorDelete,
868                                   DominatingValue<RValue>::saved_type Ptr,
869                              DominatingValue<RValue>::saved_type AllocSize)
870      : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
871        Ptr(Ptr), AllocSize(AllocSize) {}
872
873    void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
874      assert(I < NumPlacementArgs && "index out of range");
875      getPlacementArgs()[I] = Arg;
876    }
877
878    void Emit(CodeGenFunction &CGF, bool IsForEH) {
879      const FunctionProtoType *FPT
880        = OperatorDelete->getType()->getAs<FunctionProtoType>();
881      assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
882             (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
883
884      CallArgList DeleteArgs;
885
886      // The first argument is always a void*.
887      FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
888      DeleteArgs.push_back(std::make_pair(Ptr.restore(CGF), *AI++));
889
890      // A member 'operator delete' can take an extra 'size_t' argument.
891      if (FPT->getNumArgs() == NumPlacementArgs + 2) {
892        RValue RV = AllocSize.restore(CGF);
893        DeleteArgs.push_back(std::make_pair(RV, *AI++));
894      }
895
896      // Pass the rest of the arguments, which must match exactly.
897      for (unsigned I = 0; I != NumPlacementArgs; ++I) {
898        RValue RV = getPlacementArgs()[I].restore(CGF);
899        DeleteArgs.push_back(std::make_pair(RV, *AI++));
900      }
901
902      // Call 'operator delete'.
903      CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT),
904                   CGF.CGM.GetAddrOfFunction(OperatorDelete),
905                   ReturnValueSlot(), DeleteArgs, OperatorDelete);
906    }
907  };
908}
909
910/// Enter a cleanup to call 'operator delete' if the initializer in a
911/// new-expression throws.
912static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
913                                  const CXXNewExpr *E,
914                                  llvm::Value *NewPtr,
915                                  llvm::Value *AllocSize,
916                                  const CallArgList &NewArgs) {
917  // If we're not inside a conditional branch, then the cleanup will
918  // dominate and we can do the easier (and more efficient) thing.
919  if (!CGF.isInConditionalBranch()) {
920    CallDeleteDuringNew *Cleanup = CGF.EHStack
921      .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
922                                                 E->getNumPlacementArgs(),
923                                                 E->getOperatorDelete(),
924                                                 NewPtr, AllocSize);
925    for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
926      Cleanup->setPlacementArg(I, NewArgs[I+1].first);
927
928    return;
929  }
930
931  // Otherwise, we need to save all this stuff.
932  DominatingValue<RValue>::saved_type SavedNewPtr =
933    DominatingValue<RValue>::save(CGF, RValue::get(NewPtr));
934  DominatingValue<RValue>::saved_type SavedAllocSize =
935    DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
936
937  CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
938    .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(InactiveEHCleanup,
939                                                 E->getNumPlacementArgs(),
940                                                 E->getOperatorDelete(),
941                                                 SavedNewPtr,
942                                                 SavedAllocSize);
943  for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
944    Cleanup->setPlacementArg(I,
945                     DominatingValue<RValue>::save(CGF, NewArgs[I+1].first));
946
947  CGF.ActivateCleanupBlock(CGF.EHStack.stable_begin());
948}
949
950llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
951  // The element type being allocated.
952  QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
953
954  // 1. Build a call to the allocation function.
955  FunctionDecl *allocator = E->getOperatorNew();
956  const FunctionProtoType *allocatorType =
957    allocator->getType()->castAs<FunctionProtoType>();
958
959  CallArgList allocatorArgs;
960
961  // The allocation size is the first argument.
962  QualType sizeType = getContext().getSizeType();
963
964  llvm::Value *numElements = 0;
965  llvm::Value *allocSizeWithoutCookie = 0;
966  llvm::Value *allocSize =
967    EmitCXXNewAllocSize(getContext(), *this, E, numElements,
968                        allocSizeWithoutCookie);
969
970  allocatorArgs.push_back(std::make_pair(RValue::get(allocSize), sizeType));
971
972  // Emit the rest of the arguments.
973  // FIXME: Ideally, this should just use EmitCallArgs.
974  CXXNewExpr::const_arg_iterator placementArg = E->placement_arg_begin();
975
976  // First, use the types from the function type.
977  // We start at 1 here because the first argument (the allocation size)
978  // has already been emitted.
979  for (unsigned i = 1, e = allocatorType->getNumArgs(); i != e;
980       ++i, ++placementArg) {
981    QualType argType = allocatorType->getArgType(i);
982
983    assert(getContext().hasSameUnqualifiedType(argType.getNonReferenceType(),
984                                               placementArg->getType()) &&
985           "type mismatch in call argument!");
986
987    EmitCallArg(allocatorArgs, *placementArg, argType);
988  }
989
990  // Either we've emitted all the call args, or we have a call to a
991  // variadic function.
992  assert((placementArg == E->placement_arg_end() ||
993          allocatorType->isVariadic()) &&
994         "Extra arguments to non-variadic function!");
995
996  // If we still have any arguments, emit them using the type of the argument.
997  for (CXXNewExpr::const_arg_iterator placementArgsEnd = E->placement_arg_end();
998       placementArg != placementArgsEnd; ++placementArg) {
999    EmitCallArg(allocatorArgs, *placementArg, placementArg->getType());
1000  }
1001
1002  // Emit the allocation call.
1003  RValue RV =
1004    EmitCall(CGM.getTypes().getFunctionInfo(allocatorArgs, allocatorType),
1005             CGM.GetAddrOfFunction(allocator), ReturnValueSlot(),
1006             allocatorArgs, allocator);
1007
1008  // Emit a null check on the allocation result if the allocation
1009  // function is allowed to return null (because it has a non-throwing
1010  // exception spec; for this part, we inline
1011  // CXXNewExpr::shouldNullCheckAllocation()) and we have an
1012  // interesting initializer.
1013  bool nullCheck = allocatorType->isNothrow(getContext()) &&
1014    !(allocType->isPODType() && !E->hasInitializer());
1015
1016  llvm::BasicBlock *nullCheckBB = 0;
1017  llvm::BasicBlock *contBB = 0;
1018
1019  llvm::Value *allocation = RV.getScalarVal();
1020  unsigned AS =
1021    cast<llvm::PointerType>(allocation->getType())->getAddressSpace();
1022
1023  // The null-check means that the initializer is conditionally
1024  // evaluated.
1025  ConditionalEvaluation conditional(*this);
1026
1027  if (nullCheck) {
1028    conditional.begin(*this);
1029
1030    nullCheckBB = Builder.GetInsertBlock();
1031    llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
1032    contBB = createBasicBlock("new.cont");
1033
1034    llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull");
1035    Builder.CreateCondBr(isNull, contBB, notNullBB);
1036    EmitBlock(notNullBB);
1037  }
1038
1039  assert((allocSize == allocSizeWithoutCookie) ==
1040         CalculateCookiePadding(*this, E).isZero());
1041  if (allocSize != allocSizeWithoutCookie) {
1042    assert(E->isArray());
1043    allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
1044                                                       numElements,
1045                                                       E, allocType);
1046  }
1047
1048  // If there's an operator delete, enter a cleanup to call it if an
1049  // exception is thrown.
1050  EHScopeStack::stable_iterator operatorDeleteCleanup;
1051  if (E->getOperatorDelete()) {
1052    EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs);
1053    operatorDeleteCleanup = EHStack.stable_begin();
1054  }
1055
1056  const llvm::Type *elementPtrTy
1057    = ConvertTypeForMem(allocType)->getPointerTo(AS);
1058  llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy);
1059
1060  if (E->isArray()) {
1061    EmitNewInitializer(*this, E, result, numElements, allocSizeWithoutCookie);
1062
1063    // NewPtr is a pointer to the base element type.  If we're
1064    // allocating an array of arrays, we'll need to cast back to the
1065    // array pointer type.
1066    const llvm::Type *resultType = ConvertTypeForMem(E->getType());
1067    if (result->getType() != resultType)
1068      result = Builder.CreateBitCast(result, resultType);
1069  } else {
1070    EmitNewInitializer(*this, E, result, numElements, allocSizeWithoutCookie);
1071  }
1072
1073  // Deactivate the 'operator delete' cleanup if we finished
1074  // initialization.
1075  if (operatorDeleteCleanup.isValid())
1076    DeactivateCleanupBlock(operatorDeleteCleanup);
1077
1078  if (nullCheck) {
1079    conditional.end(*this);
1080
1081    llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
1082    EmitBlock(contBB);
1083
1084    llvm::PHINode *PHI = Builder.CreatePHI(result->getType(), 2);
1085    PHI->addIncoming(result, notNullBB);
1086    PHI->addIncoming(llvm::Constant::getNullValue(result->getType()),
1087                     nullCheckBB);
1088
1089    result = PHI;
1090  }
1091
1092  return result;
1093}
1094
1095void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1096                                     llvm::Value *Ptr,
1097                                     QualType DeleteTy) {
1098  assert(DeleteFD->getOverloadedOperator() == OO_Delete);
1099
1100  const FunctionProtoType *DeleteFTy =
1101    DeleteFD->getType()->getAs<FunctionProtoType>();
1102
1103  CallArgList DeleteArgs;
1104
1105  // Check if we need to pass the size to the delete operator.
1106  llvm::Value *Size = 0;
1107  QualType SizeTy;
1108  if (DeleteFTy->getNumArgs() == 2) {
1109    SizeTy = DeleteFTy->getArgType(1);
1110    CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
1111    Size = llvm::ConstantInt::get(ConvertType(SizeTy),
1112                                  DeleteTypeSize.getQuantity());
1113  }
1114
1115  QualType ArgTy = DeleteFTy->getArgType(0);
1116  llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
1117  DeleteArgs.push_back(std::make_pair(RValue::get(DeletePtr), ArgTy));
1118
1119  if (Size)
1120    DeleteArgs.push_back(std::make_pair(RValue::get(Size), SizeTy));
1121
1122  // Emit the call to delete.
1123  EmitCall(CGM.getTypes().getFunctionInfo(DeleteArgs, DeleteFTy),
1124           CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(),
1125           DeleteArgs, DeleteFD);
1126}
1127
1128namespace {
1129  /// Calls the given 'operator delete' on a single object.
1130  struct CallObjectDelete : EHScopeStack::Cleanup {
1131    llvm::Value *Ptr;
1132    const FunctionDecl *OperatorDelete;
1133    QualType ElementType;
1134
1135    CallObjectDelete(llvm::Value *Ptr,
1136                     const FunctionDecl *OperatorDelete,
1137                     QualType ElementType)
1138      : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1139
1140    void Emit(CodeGenFunction &CGF, bool IsForEH) {
1141      CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
1142    }
1143  };
1144}
1145
1146/// Emit the code for deleting a single object.
1147static void EmitObjectDelete(CodeGenFunction &CGF,
1148                             const FunctionDecl *OperatorDelete,
1149                             llvm::Value *Ptr,
1150                             QualType ElementType) {
1151  // Find the destructor for the type, if applicable.  If the
1152  // destructor is virtual, we'll just emit the vcall and return.
1153  const CXXDestructorDecl *Dtor = 0;
1154  if (const RecordType *RT = ElementType->getAs<RecordType>()) {
1155    CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1156    if (!RD->hasTrivialDestructor()) {
1157      Dtor = RD->getDestructor();
1158
1159      if (Dtor->isVirtual()) {
1160        const llvm::Type *Ty =
1161          CGF.getTypes().GetFunctionType(CGF.getTypes().getFunctionInfo(Dtor,
1162                                                               Dtor_Complete),
1163                                         /*isVariadic=*/false);
1164
1165        llvm::Value *Callee
1166          = CGF.BuildVirtualCall(Dtor, Dtor_Deleting, Ptr, Ty);
1167        CGF.EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0,
1168                              0, 0);
1169
1170        // The dtor took care of deleting the object.
1171        return;
1172      }
1173    }
1174  }
1175
1176  // Make sure that we call delete even if the dtor throws.
1177  // This doesn't have to a conditional cleanup because we're going
1178  // to pop it off in a second.
1179  CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1180                                            Ptr, OperatorDelete, ElementType);
1181
1182  if (Dtor)
1183    CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1184                              /*ForVirtualBase=*/false, Ptr);
1185
1186  CGF.PopCleanupBlock();
1187}
1188
1189namespace {
1190  /// Calls the given 'operator delete' on an array of objects.
1191  struct CallArrayDelete : EHScopeStack::Cleanup {
1192    llvm::Value *Ptr;
1193    const FunctionDecl *OperatorDelete;
1194    llvm::Value *NumElements;
1195    QualType ElementType;
1196    CharUnits CookieSize;
1197
1198    CallArrayDelete(llvm::Value *Ptr,
1199                    const FunctionDecl *OperatorDelete,
1200                    llvm::Value *NumElements,
1201                    QualType ElementType,
1202                    CharUnits CookieSize)
1203      : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
1204        ElementType(ElementType), CookieSize(CookieSize) {}
1205
1206    void Emit(CodeGenFunction &CGF, bool IsForEH) {
1207      const FunctionProtoType *DeleteFTy =
1208        OperatorDelete->getType()->getAs<FunctionProtoType>();
1209      assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2);
1210
1211      CallArgList Args;
1212
1213      // Pass the pointer as the first argument.
1214      QualType VoidPtrTy = DeleteFTy->getArgType(0);
1215      llvm::Value *DeletePtr
1216        = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
1217      Args.push_back(std::make_pair(RValue::get(DeletePtr), VoidPtrTy));
1218
1219      // Pass the original requested size as the second argument.
1220      if (DeleteFTy->getNumArgs() == 2) {
1221        QualType size_t = DeleteFTy->getArgType(1);
1222        const llvm::IntegerType *SizeTy
1223          = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
1224
1225        CharUnits ElementTypeSize =
1226          CGF.CGM.getContext().getTypeSizeInChars(ElementType);
1227
1228        // The size of an element, multiplied by the number of elements.
1229        llvm::Value *Size
1230          = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
1231        Size = CGF.Builder.CreateMul(Size, NumElements);
1232
1233        // Plus the size of the cookie if applicable.
1234        if (!CookieSize.isZero()) {
1235          llvm::Value *CookieSizeV
1236            = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
1237          Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
1238        }
1239
1240        Args.push_back(std::make_pair(RValue::get(Size), size_t));
1241      }
1242
1243      // Emit the call to delete.
1244      CGF.EmitCall(CGF.getTypes().getFunctionInfo(Args, DeleteFTy),
1245                   CGF.CGM.GetAddrOfFunction(OperatorDelete),
1246                   ReturnValueSlot(), Args, OperatorDelete);
1247    }
1248  };
1249}
1250
1251/// Emit the code for deleting an array of objects.
1252static void EmitArrayDelete(CodeGenFunction &CGF,
1253                            const CXXDeleteExpr *E,
1254                            llvm::Value *Ptr,
1255                            QualType ElementType) {
1256  llvm::Value *NumElements = 0;
1257  llvm::Value *AllocatedPtr = 0;
1258  CharUnits CookieSize;
1259  CGF.CGM.getCXXABI().ReadArrayCookie(CGF, Ptr, E, ElementType,
1260                                      NumElements, AllocatedPtr, CookieSize);
1261
1262  assert(AllocatedPtr && "ReadArrayCookie didn't set AllocatedPtr");
1263
1264  // Make sure that we call delete even if one of the dtors throws.
1265  const FunctionDecl *OperatorDelete = E->getOperatorDelete();
1266  CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
1267                                           AllocatedPtr, OperatorDelete,
1268                                           NumElements, ElementType,
1269                                           CookieSize);
1270
1271  if (const CXXRecordDecl *RD = ElementType->getAsCXXRecordDecl()) {
1272    if (!RD->hasTrivialDestructor()) {
1273      assert(NumElements && "ReadArrayCookie didn't find element count"
1274                            " for a class with destructor");
1275      CGF.EmitCXXAggrDestructorCall(RD->getDestructor(), NumElements, Ptr);
1276    }
1277  }
1278
1279  CGF.PopCleanupBlock();
1280}
1281
1282void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
1283
1284  // Get at the argument before we performed the implicit conversion
1285  // to void*.
1286  const Expr *Arg = E->getArgument();
1287  while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
1288    if (ICE->getCastKind() != CK_UserDefinedConversion &&
1289        ICE->getType()->isVoidPointerType())
1290      Arg = ICE->getSubExpr();
1291    else
1292      break;
1293  }
1294
1295  llvm::Value *Ptr = EmitScalarExpr(Arg);
1296
1297  // Null check the pointer.
1298  llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
1299  llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
1300
1301  llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull");
1302
1303  Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
1304  EmitBlock(DeleteNotNull);
1305
1306  // We might be deleting a pointer to array.  If so, GEP down to the
1307  // first non-array element.
1308  // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
1309  QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
1310  if (DeleteTy->isConstantArrayType()) {
1311    llvm::Value *Zero = Builder.getInt32(0);
1312    llvm::SmallVector<llvm::Value*,8> GEP;
1313
1314    GEP.push_back(Zero); // point at the outermost array
1315
1316    // For each layer of array type we're pointing at:
1317    while (const ConstantArrayType *Arr
1318             = getContext().getAsConstantArrayType(DeleteTy)) {
1319      // 1. Unpeel the array type.
1320      DeleteTy = Arr->getElementType();
1321
1322      // 2. GEP to the first element of the array.
1323      GEP.push_back(Zero);
1324    }
1325
1326    Ptr = Builder.CreateInBoundsGEP(Ptr, GEP.begin(), GEP.end(), "del.first");
1327  }
1328
1329  assert(ConvertTypeForMem(DeleteTy) ==
1330         cast<llvm::PointerType>(Ptr->getType())->getElementType());
1331
1332  if (E->isArrayForm()) {
1333    EmitArrayDelete(*this, E, Ptr, DeleteTy);
1334  } else {
1335    EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy);
1336  }
1337
1338  EmitBlock(DeleteEnd);
1339}
1340
1341static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
1342  // void __cxa_bad_typeid();
1343
1344  const llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext());
1345  const llvm::FunctionType *FTy =
1346  llvm::FunctionType::get(VoidTy, false);
1347
1348  return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1349}
1350
1351static void EmitBadTypeidCall(CodeGenFunction &CGF) {
1352  llvm::Value *F = getBadTypeidFn(CGF);
1353  if (llvm::BasicBlock *InvokeDest = CGF.getInvokeDest()) {
1354    llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont");
1355    CGF.Builder.CreateInvoke(F, Cont, InvokeDest)->setDoesNotReturn();
1356    CGF.EmitBlock(Cont);
1357  } else
1358    CGF.Builder.CreateCall(F)->setDoesNotReturn();
1359
1360  CGF.Builder.CreateUnreachable();
1361}
1362
1363llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
1364  QualType Ty = E->getType();
1365  const llvm::Type *LTy = ConvertType(Ty)->getPointerTo();
1366
1367  if (E->isTypeOperand()) {
1368    llvm::Constant *TypeInfo =
1369      CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand());
1370    return Builder.CreateBitCast(TypeInfo, LTy);
1371  }
1372
1373  Expr *subE = E->getExprOperand();
1374  Ty = subE->getType();
1375  CanQualType CanTy = CGM.getContext().getCanonicalType(Ty);
1376  Ty = CanTy.getUnqualifiedType().getNonReferenceType();
1377  if (const RecordType *RT = Ty->getAs<RecordType>()) {
1378    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1379    if (RD->isPolymorphic()) {
1380      // FIXME: if subE is an lvalue do
1381      LValue Obj = EmitLValue(subE);
1382      llvm::Value *This = Obj.getAddress();
1383      // We need to do a zero check for *p, unless it has NonNullAttr.
1384      // FIXME: PointerType->hasAttr<NonNullAttr>()
1385      bool CanBeZero = false;
1386      if (UnaryOperator *UO = dyn_cast<UnaryOperator>(subE->IgnoreParens()))
1387        if (UO->getOpcode() == UO_Deref)
1388          CanBeZero = true;
1389      if (CanBeZero) {
1390        llvm::BasicBlock *NonZeroBlock = createBasicBlock();
1391        llvm::BasicBlock *ZeroBlock = createBasicBlock();
1392
1393        llvm::Value *Zero = llvm::Constant::getNullValue(This->getType());
1394        Builder.CreateCondBr(Builder.CreateICmpNE(This, Zero),
1395                             NonZeroBlock, ZeroBlock);
1396        EmitBlock(ZeroBlock);
1397
1398        EmitBadTypeidCall(*this);
1399
1400        EmitBlock(NonZeroBlock);
1401      }
1402      llvm::Value *V = GetVTablePtr(This, LTy->getPointerTo());
1403      V = Builder.CreateConstInBoundsGEP1_64(V, -1ULL);
1404      V = Builder.CreateLoad(V);
1405      return V;
1406    }
1407  }
1408  return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(Ty), LTy);
1409}
1410
1411static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) {
1412  // void *__dynamic_cast(const void *sub,
1413  //                      const abi::__class_type_info *src,
1414  //                      const abi::__class_type_info *dst,
1415  //                      std::ptrdiff_t src2dst_offset);
1416
1417  const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
1418  const llvm::Type *PtrDiffTy =
1419    CGF.ConvertType(CGF.getContext().getPointerDiffType());
1420
1421  const llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1422
1423  const llvm::FunctionType *FTy =
1424    llvm::FunctionType::get(Int8PtrTy, Args, false);
1425
1426  return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast");
1427}
1428
1429static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
1430  // void __cxa_bad_cast();
1431
1432  const llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext());
1433  const llvm::FunctionType *FTy =
1434    llvm::FunctionType::get(VoidTy, false);
1435
1436  return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1437}
1438
1439static void EmitBadCastCall(CodeGenFunction &CGF) {
1440  llvm::Value *F = getBadCastFn(CGF);
1441  if (llvm::BasicBlock *InvokeDest = CGF.getInvokeDest()) {
1442    llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont");
1443    CGF.Builder.CreateInvoke(F, Cont, InvokeDest)->setDoesNotReturn();
1444    CGF.EmitBlock(Cont);
1445  } else
1446    CGF.Builder.CreateCall(F)->setDoesNotReturn();
1447
1448  CGF.Builder.CreateUnreachable();
1449}
1450
1451static llvm::Value *
1452EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value,
1453                    QualType SrcTy, QualType DestTy,
1454                    llvm::BasicBlock *CastEnd) {
1455  const llvm::Type *PtrDiffLTy =
1456    CGF.ConvertType(CGF.getContext().getPointerDiffType());
1457  const llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1458
1459  if (const PointerType *PTy = DestTy->getAs<PointerType>()) {
1460    if (PTy->getPointeeType()->isVoidType()) {
1461      // C++ [expr.dynamic.cast]p7:
1462      //   If T is "pointer to cv void," then the result is a pointer to the
1463      //   most derived object pointed to by v.
1464
1465      // Get the vtable pointer.
1466      llvm::Value *VTable = CGF.GetVTablePtr(Value, PtrDiffLTy->getPointerTo());
1467
1468      // Get the offset-to-top from the vtable.
1469      llvm::Value *OffsetToTop =
1470        CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
1471      OffsetToTop = CGF.Builder.CreateLoad(OffsetToTop, "offset.to.top");
1472
1473      // Finally, add the offset to the pointer.
1474      Value = CGF.EmitCastToVoidPtr(Value);
1475      Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
1476
1477      return CGF.Builder.CreateBitCast(Value, DestLTy);
1478    }
1479  }
1480
1481  QualType SrcRecordTy;
1482  QualType DestRecordTy;
1483
1484  if (const PointerType *DestPTy = DestTy->getAs<PointerType>()) {
1485    SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
1486    DestRecordTy = DestPTy->getPointeeType();
1487  } else {
1488    SrcRecordTy = SrcTy;
1489    DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
1490  }
1491
1492  assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
1493  assert(DestRecordTy->isRecordType() && "dest type must be a record type!");
1494
1495  llvm::Value *SrcRTTI =
1496    CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1497  llvm::Value *DestRTTI =
1498    CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1499
1500  // FIXME: Actually compute a hint here.
1501  llvm::Value *OffsetHint = llvm::ConstantInt::get(PtrDiffLTy, -1ULL);
1502
1503  // Emit the call to __dynamic_cast.
1504  Value = CGF.EmitCastToVoidPtr(Value);
1505  Value = CGF.Builder.CreateCall4(getDynamicCastFn(CGF), Value,
1506                                  SrcRTTI, DestRTTI, OffsetHint);
1507  Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1508
1509  /// C++ [expr.dynamic.cast]p9:
1510  ///   A failed cast to reference type throws std::bad_cast
1511  if (DestTy->isReferenceType()) {
1512    llvm::BasicBlock *BadCastBlock =
1513      CGF.createBasicBlock("dynamic_cast.bad_cast");
1514
1515    llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1516    CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1517
1518    CGF.EmitBlock(BadCastBlock);
1519    EmitBadCastCall(CGF);
1520  }
1521
1522  return Value;
1523}
1524
1525static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
1526                                          QualType DestTy) {
1527  const llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1528  if (DestTy->isPointerType())
1529    return llvm::Constant::getNullValue(DestLTy);
1530
1531  /// C++ [expr.dynamic.cast]p9:
1532  ///   A failed cast to reference type throws std::bad_cast
1533  EmitBadCastCall(CGF);
1534
1535  CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
1536  return llvm::UndefValue::get(DestLTy);
1537}
1538
1539llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
1540                                              const CXXDynamicCastExpr *DCE) {
1541  QualType DestTy = DCE->getTypeAsWritten();
1542
1543  if (DCE->isAlwaysNull())
1544    return EmitDynamicCastToNull(*this, DestTy);
1545
1546  QualType SrcTy = DCE->getSubExpr()->getType();
1547
1548  // C++ [expr.dynamic.cast]p4:
1549  //   If the value of v is a null pointer value in the pointer case, the result
1550  //   is the null pointer value of type T.
1551  bool ShouldNullCheckSrcValue = SrcTy->isPointerType();
1552
1553  llvm::BasicBlock *CastNull = 0;
1554  llvm::BasicBlock *CastNotNull = 0;
1555  llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
1556
1557  if (ShouldNullCheckSrcValue) {
1558    CastNull = createBasicBlock("dynamic_cast.null");
1559    CastNotNull = createBasicBlock("dynamic_cast.notnull");
1560
1561    llvm::Value *IsNull = Builder.CreateIsNull(Value);
1562    Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
1563    EmitBlock(CastNotNull);
1564  }
1565
1566  Value = EmitDynamicCastCall(*this, Value, SrcTy, DestTy, CastEnd);
1567
1568  if (ShouldNullCheckSrcValue) {
1569    EmitBranch(CastEnd);
1570
1571    EmitBlock(CastNull);
1572    EmitBranch(CastEnd);
1573  }
1574
1575  EmitBlock(CastEnd);
1576
1577  if (ShouldNullCheckSrcValue) {
1578    llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
1579    PHI->addIncoming(Value, CastNotNull);
1580    PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
1581
1582    Value = PHI;
1583  }
1584
1585  return Value;
1586}
1587