CGExprCXX.cpp revision 1679f5a84ae1e578b0de347c89eaf31e0465f33c
1//===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This contains code dealing with code generation of C++ expressions
11//
12//===----------------------------------------------------------------------===//
13
14#include "clang/Frontend/CodeGenOptions.h"
15#include "CodeGenFunction.h"
16#include "CGCXXABI.h"
17#include "CGObjCRuntime.h"
18#include "CGDebugInfo.h"
19#include "llvm/Intrinsics.h"
20using namespace clang;
21using namespace CodeGen;
22
23RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
24                                          llvm::Value *Callee,
25                                          ReturnValueSlot ReturnValue,
26                                          llvm::Value *This,
27                                          llvm::Value *VTT,
28                                          CallExpr::const_arg_iterator ArgBeg,
29                                          CallExpr::const_arg_iterator ArgEnd) {
30  assert(MD->isInstance() &&
31         "Trying to emit a member call expr on a static method!");
32
33  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
34
35  CallArgList Args;
36
37  // Push the this ptr.
38  Args.push_back(std::make_pair(RValue::get(This),
39                                MD->getThisType(getContext())));
40
41  // If there is a VTT parameter, emit it.
42  if (VTT) {
43    QualType T = getContext().getPointerType(getContext().VoidPtrTy);
44    Args.push_back(std::make_pair(RValue::get(VTT), T));
45  }
46
47  // And the rest of the call args
48  EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
49
50  QualType ResultType = FPT->getResultType();
51  return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args,
52                                                 FPT->getExtInfo()),
53                  Callee, ReturnValue, Args, MD);
54}
55
56static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) {
57  QualType DerivedType = Base->IgnoreParenCasts()->getType();
58  if (const PointerType *PTy = DerivedType->getAs<PointerType>())
59    DerivedType = PTy->getPointeeType();
60
61  return cast<CXXRecordDecl>(DerivedType->castAs<RecordType>()->getDecl());
62}
63
64/// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
65/// expr can be devirtualized.
66static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
67                                               const Expr *Base,
68                                               const CXXMethodDecl *MD) {
69
70  // When building with -fapple-kext, all calls must go through the vtable since
71  // the kernel linker can do runtime patching of vtables.
72  if (Context.getLangOptions().AppleKext)
73    return false;
74
75  // If the most derived class is marked final, we know that no subclass can
76  // override this member function and so we can devirtualize it. For example:
77  //
78  // struct A { virtual void f(); }
79  // struct B final : A { };
80  //
81  // void f(B *b) {
82  //   b->f();
83  // }
84  //
85  const CXXRecordDecl *MostDerivedClassDecl = getMostDerivedClassDecl(Base);
86  if (MostDerivedClassDecl->hasAttr<FinalAttr>())
87    return true;
88
89  // If the member function is marked 'final', we know that it can't be
90  // overridden and can therefore devirtualize it.
91  if (MD->hasAttr<FinalAttr>())
92    return true;
93
94  // Similarly, if the class itself is marked 'final' it can't be overridden
95  // and we can therefore devirtualize the member function call.
96  if (MD->getParent()->hasAttr<FinalAttr>())
97    return true;
98
99  if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
100    if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
101      // This is a record decl. We know the type and can devirtualize it.
102      return VD->getType()->isRecordType();
103    }
104
105    return false;
106  }
107
108  // We can always devirtualize calls on temporary object expressions.
109  if (isa<CXXConstructExpr>(Base))
110    return true;
111
112  // And calls on bound temporaries.
113  if (isa<CXXBindTemporaryExpr>(Base))
114    return true;
115
116  // Check if this is a call expr that returns a record type.
117  if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
118    return CE->getCallReturnType()->isRecordType();
119
120  // We can't devirtualize the call.
121  return false;
122}
123
124// Note: This function also emit constructor calls to support a MSVC
125// extensions allowing explicit constructor function call.
126RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
127                                              ReturnValueSlot ReturnValue) {
128  if (isa<BinaryOperator>(CE->getCallee()->IgnoreParens()))
129    return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
130
131  const MemberExpr *ME = cast<MemberExpr>(CE->getCallee()->IgnoreParens());
132  const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
133
134  CGDebugInfo *DI = getDebugInfo();
135  if (DI && CGM.getCodeGenOpts().LimitDebugInfo
136      && !isa<CallExpr>(ME->getBase())) {
137    QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType();
138    if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) {
139      DI->getOrCreateRecordType(PTy->getPointeeType(),
140                                MD->getParent()->getLocation());
141    }
142  }
143
144  if (MD->isStatic()) {
145    // The method is static, emit it as we would a regular call.
146    llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
147    return EmitCall(getContext().getPointerType(MD->getType()), Callee,
148                    ReturnValue, CE->arg_begin(), CE->arg_end());
149  }
150
151  // Compute the object pointer.
152  llvm::Value *This;
153  if (ME->isArrow())
154    This = EmitScalarExpr(ME->getBase());
155  else
156    This = EmitLValue(ME->getBase()).getAddress();
157
158  if (MD->isTrivial()) {
159    if (isa<CXXDestructorDecl>(MD)) return RValue::get(0);
160    if (isa<CXXConstructorDecl>(MD) &&
161        cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
162      return RValue::get(0);
163
164    if (MD->isCopyAssignmentOperator()) {
165      // We don't like to generate the trivial copy assignment operator when
166      // it isn't necessary; just produce the proper effect here.
167      llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
168      EmitAggregateCopy(This, RHS, CE->getType());
169      return RValue::get(This);
170    }
171
172    if (isa<CXXConstructorDecl>(MD) &&
173        cast<CXXConstructorDecl>(MD)->isCopyConstructor()) {
174      llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
175      EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS,
176                                     CE->arg_begin(), CE->arg_end());
177      return RValue::get(This);
178    }
179    llvm_unreachable("unknown trivial member function");
180  }
181
182  // Compute the function type we're calling.
183  const CGFunctionInfo *FInfo = 0;
184  if (isa<CXXDestructorDecl>(MD))
185    FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXDestructorDecl>(MD),
186                                           Dtor_Complete);
187  else if (isa<CXXConstructorDecl>(MD))
188    FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXConstructorDecl>(MD),
189                                            Ctor_Complete);
190  else
191    FInfo = &CGM.getTypes().getFunctionInfo(MD);
192
193  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
194  const llvm::Type *Ty
195    = CGM.getTypes().GetFunctionType(*FInfo, FPT->isVariadic());
196
197  // C++ [class.virtual]p12:
198  //   Explicit qualification with the scope operator (5.1) suppresses the
199  //   virtual call mechanism.
200  //
201  // We also don't emit a virtual call if the base expression has a record type
202  // because then we know what the type is.
203  bool UseVirtualCall;
204  UseVirtualCall = MD->isVirtual() && !ME->hasQualifier()
205                   && !canDevirtualizeMemberFunctionCalls(getContext(),
206                                                          ME->getBase(), MD);
207  llvm::Value *Callee;
208  if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
209    if (UseVirtualCall) {
210      Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty);
211    } else {
212      Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty);
213    }
214  } else if (const CXXConstructorDecl *Ctor =
215               dyn_cast<CXXConstructorDecl>(MD)) {
216    Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
217  } else if (UseVirtualCall) {
218      Callee = BuildVirtualCall(MD, This, Ty);
219  } else {
220    if (getContext().getLangOptions().AppleKext &&
221        MD->isVirtual() &&
222        ME->hasQualifier())
223      Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), This, Ty);
224    else
225      Callee = CGM.GetAddrOfFunction(MD, Ty);
226  }
227
228  return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
229                           CE->arg_begin(), CE->arg_end());
230}
231
232RValue
233CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
234                                              ReturnValueSlot ReturnValue) {
235  const BinaryOperator *BO =
236      cast<BinaryOperator>(E->getCallee()->IgnoreParens());
237  const Expr *BaseExpr = BO->getLHS();
238  const Expr *MemFnExpr = BO->getRHS();
239
240  const MemberPointerType *MPT =
241    MemFnExpr->getType()->getAs<MemberPointerType>();
242
243  const FunctionProtoType *FPT =
244    MPT->getPointeeType()->getAs<FunctionProtoType>();
245  const CXXRecordDecl *RD =
246    cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
247
248  // Get the member function pointer.
249  llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
250
251  // Emit the 'this' pointer.
252  llvm::Value *This;
253
254  if (BO->getOpcode() == BO_PtrMemI)
255    This = EmitScalarExpr(BaseExpr);
256  else
257    This = EmitLValue(BaseExpr).getAddress();
258
259  // Ask the ABI to load the callee.  Note that This is modified.
260  llvm::Value *Callee =
261    CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(CGF, This, MemFnPtr, MPT);
262
263  CallArgList Args;
264
265  QualType ThisType =
266    getContext().getPointerType(getContext().getTagDeclType(RD));
267
268  // Push the this ptr.
269  Args.push_back(std::make_pair(RValue::get(This), ThisType));
270
271  // And the rest of the call args
272  EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
273  const FunctionType *BO_FPT = BO->getType()->getAs<FunctionProtoType>();
274  return EmitCall(CGM.getTypes().getFunctionInfo(Args, BO_FPT), Callee,
275                  ReturnValue, Args);
276}
277
278RValue
279CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
280                                               const CXXMethodDecl *MD,
281                                               ReturnValueSlot ReturnValue) {
282  assert(MD->isInstance() &&
283         "Trying to emit a member call expr on a static method!");
284  LValue LV = EmitLValue(E->getArg(0));
285  llvm::Value *This = LV.getAddress();
286
287  if (MD->isCopyAssignmentOperator()) {
288    const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(MD->getDeclContext());
289    if (ClassDecl->hasTrivialCopyAssignment()) {
290      assert(!ClassDecl->hasUserDeclaredCopyAssignment() &&
291             "EmitCXXOperatorMemberCallExpr - user declared copy assignment");
292      llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
293      QualType Ty = E->getType();
294      EmitAggregateCopy(This, Src, Ty);
295      return RValue::get(This);
296    }
297  }
298
299  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
300  const llvm::Type *Ty =
301    CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
302                                   FPT->isVariadic());
303  llvm::Value *Callee;
304  if (MD->isVirtual() &&
305      !canDevirtualizeMemberFunctionCalls(getContext(),
306                                           E->getArg(0), MD))
307    Callee = BuildVirtualCall(MD, This, Ty);
308  else
309    Callee = CGM.GetAddrOfFunction(MD, Ty);
310
311  return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
312                           E->arg_begin() + 1, E->arg_end());
313}
314
315void
316CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
317                                      AggValueSlot Dest) {
318  assert(!Dest.isIgnored() && "Must have a destination!");
319  const CXXConstructorDecl *CD = E->getConstructor();
320
321  // If we require zero initialization before (or instead of) calling the
322  // constructor, as can be the case with a non-user-provided default
323  // constructor, emit the zero initialization now.
324  if (E->requiresZeroInitialization())
325    EmitNullInitialization(Dest.getAddr(), E->getType());
326
327  // If this is a call to a trivial default constructor, do nothing.
328  if (CD->isTrivial() && CD->isDefaultConstructor())
329    return;
330
331  // Elide the constructor if we're constructing from a temporary.
332  // The temporary check is required because Sema sets this on NRVO
333  // returns.
334  if (getContext().getLangOptions().ElideConstructors && E->isElidable()) {
335    assert(getContext().hasSameUnqualifiedType(E->getType(),
336                                               E->getArg(0)->getType()));
337    if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
338      EmitAggExpr(E->getArg(0), Dest);
339      return;
340    }
341  }
342
343  const ConstantArrayType *Array
344    = getContext().getAsConstantArrayType(E->getType());
345  if (Array) {
346    QualType BaseElementTy = getContext().getBaseElementType(Array);
347    const llvm::Type *BasePtr = ConvertType(BaseElementTy);
348    BasePtr = llvm::PointerType::getUnqual(BasePtr);
349    llvm::Value *BaseAddrPtr =
350      Builder.CreateBitCast(Dest.getAddr(), BasePtr);
351
352    EmitCXXAggrConstructorCall(CD, Array, BaseAddrPtr,
353                               E->arg_begin(), E->arg_end());
354  }
355  else {
356    CXXCtorType Type =
357      (E->getConstructionKind() == CXXConstructExpr::CK_Complete)
358      ? Ctor_Complete : Ctor_Base;
359    bool ForVirtualBase =
360      E->getConstructionKind() == CXXConstructExpr::CK_VirtualBase;
361
362    // Call the constructor.
363    EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest.getAddr(),
364                           E->arg_begin(), E->arg_end());
365  }
366}
367
368void
369CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
370                                            llvm::Value *Src,
371                                            const Expr *Exp) {
372  if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
373    Exp = E->getSubExpr();
374  assert(isa<CXXConstructExpr>(Exp) &&
375         "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
376  const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
377  const CXXConstructorDecl *CD = E->getConstructor();
378  RunCleanupsScope Scope(*this);
379
380  // If we require zero initialization before (or instead of) calling the
381  // constructor, as can be the case with a non-user-provided default
382  // constructor, emit the zero initialization now.
383  // FIXME. Do I still need this for a copy ctor synthesis?
384  if (E->requiresZeroInitialization())
385    EmitNullInitialization(Dest, E->getType());
386
387  assert(!getContext().getAsConstantArrayType(E->getType())
388         && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
389  EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src,
390                                 E->arg_begin(), E->arg_end());
391}
392
393/// Check whether the given operator new[] is the global placement
394/// operator new[].
395static bool IsPlacementOperatorNewArray(ASTContext &Ctx,
396                                        const FunctionDecl *Fn) {
397  // Must be in global scope.  Note that allocation functions can't be
398  // declared in namespaces.
399  if (!Fn->getDeclContext()->getRedeclContext()->isFileContext())
400    return false;
401
402  // Signature must be void *operator new[](size_t, void*).
403  // The size_t is common to all operator new[]s.
404  if (Fn->getNumParams() != 2)
405    return false;
406
407  CanQualType ParamType = Ctx.getCanonicalType(Fn->getParamDecl(1)->getType());
408  return (ParamType == Ctx.VoidPtrTy);
409}
410
411static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
412                                        const CXXNewExpr *E) {
413  if (!E->isArray())
414    return CharUnits::Zero();
415
416  // No cookie is required if the new operator being used is
417  // ::operator new[](size_t, void*).
418  const FunctionDecl *OperatorNew = E->getOperatorNew();
419  if (IsPlacementOperatorNewArray(CGF.getContext(), OperatorNew))
420    return CharUnits::Zero();
421
422  return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
423}
424
425static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context,
426                                        CodeGenFunction &CGF,
427                                        const CXXNewExpr *E,
428                                        llvm::Value *&NumElements,
429                                        llvm::Value *&SizeWithoutCookie) {
430  QualType ElemType = E->getAllocatedType();
431
432  const llvm::IntegerType *SizeTy =
433    cast<llvm::IntegerType>(CGF.ConvertType(CGF.getContext().getSizeType()));
434
435  CharUnits TypeSize = CGF.getContext().getTypeSizeInChars(ElemType);
436
437  if (!E->isArray()) {
438    SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
439    return SizeWithoutCookie;
440  }
441
442  // Figure out the cookie size.
443  CharUnits CookieSize = CalculateCookiePadding(CGF, E);
444
445  // Emit the array size expression.
446  // We multiply the size of all dimensions for NumElements.
447  // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
448  NumElements = CGF.EmitScalarExpr(E->getArraySize());
449  assert(NumElements->getType() == SizeTy && "element count not a size_t");
450
451  uint64_t ArraySizeMultiplier = 1;
452  while (const ConstantArrayType *CAT
453             = CGF.getContext().getAsConstantArrayType(ElemType)) {
454    ElemType = CAT->getElementType();
455    ArraySizeMultiplier *= CAT->getSize().getZExtValue();
456  }
457
458  llvm::Value *Size;
459
460  // If someone is doing 'new int[42]' there is no need to do a dynamic check.
461  // Don't bloat the -O0 code.
462  if (llvm::ConstantInt *NumElementsC =
463        dyn_cast<llvm::ConstantInt>(NumElements)) {
464    llvm::APInt NEC = NumElementsC->getValue();
465    unsigned SizeWidth = NEC.getBitWidth();
466
467    // Determine if there is an overflow here by doing an extended multiply.
468    NEC = NEC.zext(SizeWidth*2);
469    llvm::APInt SC(SizeWidth*2, TypeSize.getQuantity());
470    SC *= NEC;
471
472    if (!CookieSize.isZero()) {
473      // Save the current size without a cookie.  We don't care if an
474      // overflow's already happened because SizeWithoutCookie isn't
475      // used if the allocator returns null or throws, as it should
476      // always do on an overflow.
477      llvm::APInt SWC = SC.trunc(SizeWidth);
478      SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, SWC);
479
480      // Add the cookie size.
481      SC += llvm::APInt(SizeWidth*2, CookieSize.getQuantity());
482    }
483
484    if (SC.countLeadingZeros() >= SizeWidth) {
485      SC = SC.trunc(SizeWidth);
486      Size = llvm::ConstantInt::get(SizeTy, SC);
487    } else {
488      // On overflow, produce a -1 so operator new throws.
489      Size = llvm::Constant::getAllOnesValue(SizeTy);
490    }
491
492    // Scale NumElements while we're at it.
493    uint64_t N = NEC.getZExtValue() * ArraySizeMultiplier;
494    NumElements = llvm::ConstantInt::get(SizeTy, N);
495
496  // Otherwise, we don't need to do an overflow-checked multiplication if
497  // we're multiplying by one.
498  } else if (TypeSize.isOne()) {
499    assert(ArraySizeMultiplier == 1);
500
501    Size = NumElements;
502
503    // If we need a cookie, add its size in with an overflow check.
504    // This is maybe a little paranoid.
505    if (!CookieSize.isZero()) {
506      SizeWithoutCookie = Size;
507
508      llvm::Value *CookieSizeV
509        = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
510
511      const llvm::Type *Types[] = { SizeTy };
512      llvm::Value *UAddF
513        = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1);
514      llvm::Value *AddRes
515        = CGF.Builder.CreateCall2(UAddF, Size, CookieSizeV);
516
517      Size = CGF.Builder.CreateExtractValue(AddRes, 0);
518      llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1);
519      Size = CGF.Builder.CreateSelect(DidOverflow,
520                                      llvm::ConstantInt::get(SizeTy, -1),
521                                      Size);
522    }
523
524  // Otherwise use the int.umul.with.overflow intrinsic.
525  } else {
526    llvm::Value *OutermostElementSize
527      = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
528
529    llvm::Value *NumOutermostElements = NumElements;
530
531    // Scale NumElements by the array size multiplier.  This might
532    // overflow, but only if the multiplication below also overflows,
533    // in which case this multiplication isn't used.
534    if (ArraySizeMultiplier != 1)
535      NumElements = CGF.Builder.CreateMul(NumElements,
536                         llvm::ConstantInt::get(SizeTy, ArraySizeMultiplier));
537
538    // The requested size of the outermost array is non-constant.
539    // Multiply that by the static size of the elements of that array;
540    // on unsigned overflow, set the size to -1 to trigger an
541    // exception from the allocation routine.  This is sufficient to
542    // prevent buffer overruns from the allocator returning a
543    // seemingly valid pointer to insufficient space.  This idea comes
544    // originally from MSVC, and GCC has an open bug requesting
545    // similar behavior:
546    //   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=19351
547    //
548    // This will not be sufficient for C++0x, which requires a
549    // specific exception class (std::bad_array_new_length).
550    // That will require ABI support that has not yet been specified.
551    const llvm::Type *Types[] = { SizeTy };
552    llvm::Value *UMulF
553      = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, Types, 1);
554    llvm::Value *MulRes = CGF.Builder.CreateCall2(UMulF, NumOutermostElements,
555                                                  OutermostElementSize);
556
557    // The overflow bit.
558    llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(MulRes, 1);
559
560    // The result of the multiplication.
561    Size = CGF.Builder.CreateExtractValue(MulRes, 0);
562
563    // If we have a cookie, we need to add that size in, too.
564    if (!CookieSize.isZero()) {
565      SizeWithoutCookie = Size;
566
567      llvm::Value *CookieSizeV
568        = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
569      llvm::Value *UAddF
570        = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1);
571      llvm::Value *AddRes
572        = CGF.Builder.CreateCall2(UAddF, SizeWithoutCookie, CookieSizeV);
573
574      Size = CGF.Builder.CreateExtractValue(AddRes, 0);
575
576      llvm::Value *AddDidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1);
577      DidOverflow = CGF.Builder.CreateAnd(DidOverflow, AddDidOverflow);
578    }
579
580    Size = CGF.Builder.CreateSelect(DidOverflow,
581                                    llvm::ConstantInt::get(SizeTy, -1),
582                                    Size);
583  }
584
585  if (CookieSize.isZero())
586    SizeWithoutCookie = Size;
587  else
588    assert(SizeWithoutCookie && "didn't set SizeWithoutCookie?");
589
590  return Size;
591}
592
593static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E,
594                                    llvm::Value *NewPtr) {
595
596  assert(E->getNumConstructorArgs() == 1 &&
597         "Can only have one argument to initializer of POD type.");
598
599  const Expr *Init = E->getConstructorArg(0);
600  QualType AllocType = E->getAllocatedType();
601
602  unsigned Alignment =
603    CGF.getContext().getTypeAlignInChars(AllocType).getQuantity();
604  if (!CGF.hasAggregateLLVMType(AllocType))
605    CGF.EmitStoreOfScalar(CGF.EmitScalarExpr(Init), NewPtr,
606                          AllocType.isVolatileQualified(), Alignment,
607                          AllocType);
608  else if (AllocType->isAnyComplexType())
609    CGF.EmitComplexExprIntoAddr(Init, NewPtr,
610                                AllocType.isVolatileQualified());
611  else {
612    AggValueSlot Slot
613      = AggValueSlot::forAddr(NewPtr, AllocType.isVolatileQualified(), true);
614    CGF.EmitAggExpr(Init, Slot);
615  }
616}
617
618void
619CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
620                                         llvm::Value *NewPtr,
621                                         llvm::Value *NumElements) {
622  // We have a POD type.
623  if (E->getNumConstructorArgs() == 0)
624    return;
625
626  const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
627
628  // Create a temporary for the loop index and initialize it with 0.
629  llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index");
630  llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
631  Builder.CreateStore(Zero, IndexPtr);
632
633  // Start the loop with a block that tests the condition.
634  llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
635  llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
636
637  EmitBlock(CondBlock);
638
639  llvm::BasicBlock *ForBody = createBasicBlock("for.body");
640
641  // Generate: if (loop-index < number-of-elements fall to the loop body,
642  // otherwise, go to the block after the for-loop.
643  llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
644  llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElements, "isless");
645  // If the condition is true, execute the body.
646  Builder.CreateCondBr(IsLess, ForBody, AfterFor);
647
648  EmitBlock(ForBody);
649
650  llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
651  // Inside the loop body, emit the constructor call on the array element.
652  Counter = Builder.CreateLoad(IndexPtr);
653  llvm::Value *Address = Builder.CreateInBoundsGEP(NewPtr, Counter,
654                                                   "arrayidx");
655  StoreAnyExprIntoOneUnit(*this, E, Address);
656
657  EmitBlock(ContinueBlock);
658
659  // Emit the increment of the loop counter.
660  llvm::Value *NextVal = llvm::ConstantInt::get(SizeTy, 1);
661  Counter = Builder.CreateLoad(IndexPtr);
662  NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
663  Builder.CreateStore(NextVal, IndexPtr);
664
665  // Finally, branch back up to the condition for the next iteration.
666  EmitBranch(CondBlock);
667
668  // Emit the fall-through block.
669  EmitBlock(AfterFor, true);
670}
671
672static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
673                           llvm::Value *NewPtr, llvm::Value *Size) {
674  llvm::LLVMContext &VMContext = CGF.CGM.getLLVMContext();
675  const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
676  if (NewPtr->getType() != BP)
677    NewPtr = CGF.Builder.CreateBitCast(NewPtr, BP, "tmp");
678
679  CharUnits Alignment = CGF.getContext().getTypeAlignInChars(T);
680  CGF.Builder.CreateMemSet(NewPtr, CGF.Builder.getInt8(0), Size,
681                           Alignment.getQuantity(), false);
682}
683
684static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
685                               llvm::Value *NewPtr,
686                               llvm::Value *NumElements,
687                               llvm::Value *AllocSizeWithoutCookie) {
688  if (E->isArray()) {
689    if (CXXConstructorDecl *Ctor = E->getConstructor()) {
690      bool RequiresZeroInitialization = false;
691      if (Ctor->getParent()->hasTrivialConstructor()) {
692        // If new expression did not specify value-initialization, then there
693        // is no initialization.
694        if (!E->hasInitializer() || Ctor->getParent()->isEmpty())
695          return;
696
697        if (CGF.CGM.getTypes().isZeroInitializable(E->getAllocatedType())) {
698          // Optimization: since zero initialization will just set the memory
699          // to all zeroes, generate a single memset to do it in one shot.
700          EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr,
701                         AllocSizeWithoutCookie);
702          return;
703        }
704
705        RequiresZeroInitialization = true;
706      }
707
708      CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
709                                     E->constructor_arg_begin(),
710                                     E->constructor_arg_end(),
711                                     RequiresZeroInitialization);
712      return;
713    } else if (E->getNumConstructorArgs() == 1 &&
714               isa<ImplicitValueInitExpr>(E->getConstructorArg(0))) {
715      // Optimization: since zero initialization will just set the memory
716      // to all zeroes, generate a single memset to do it in one shot.
717      EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr,
718                     AllocSizeWithoutCookie);
719      return;
720    } else {
721      CGF.EmitNewArrayInitializer(E, NewPtr, NumElements);
722      return;
723    }
724  }
725
726  if (CXXConstructorDecl *Ctor = E->getConstructor()) {
727    // Per C++ [expr.new]p15, if we have an initializer, then we're performing
728    // direct initialization. C++ [dcl.init]p5 requires that we
729    // zero-initialize storage if there are no user-declared constructors.
730    if (E->hasInitializer() &&
731        !Ctor->getParent()->hasUserDeclaredConstructor() &&
732        !Ctor->getParent()->isEmpty())
733      CGF.EmitNullInitialization(NewPtr, E->getAllocatedType());
734
735    CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
736                               NewPtr, E->constructor_arg_begin(),
737                               E->constructor_arg_end());
738
739    return;
740  }
741  // We have a POD type.
742  if (E->getNumConstructorArgs() == 0)
743    return;
744
745  StoreAnyExprIntoOneUnit(CGF, E, NewPtr);
746}
747
748namespace {
749  /// A cleanup to call the given 'operator delete' function upon
750  /// abnormal exit from a new expression.
751  class CallDeleteDuringNew : public EHScopeStack::Cleanup {
752    size_t NumPlacementArgs;
753    const FunctionDecl *OperatorDelete;
754    llvm::Value *Ptr;
755    llvm::Value *AllocSize;
756
757    RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
758
759  public:
760    static size_t getExtraSize(size_t NumPlacementArgs) {
761      return NumPlacementArgs * sizeof(RValue);
762    }
763
764    CallDeleteDuringNew(size_t NumPlacementArgs,
765                        const FunctionDecl *OperatorDelete,
766                        llvm::Value *Ptr,
767                        llvm::Value *AllocSize)
768      : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
769        Ptr(Ptr), AllocSize(AllocSize) {}
770
771    void setPlacementArg(unsigned I, RValue Arg) {
772      assert(I < NumPlacementArgs && "index out of range");
773      getPlacementArgs()[I] = Arg;
774    }
775
776    void Emit(CodeGenFunction &CGF, bool IsForEH) {
777      const FunctionProtoType *FPT
778        = OperatorDelete->getType()->getAs<FunctionProtoType>();
779      assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
780             (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
781
782      CallArgList DeleteArgs;
783
784      // The first argument is always a void*.
785      FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
786      DeleteArgs.push_back(std::make_pair(RValue::get(Ptr), *AI++));
787
788      // A member 'operator delete' can take an extra 'size_t' argument.
789      if (FPT->getNumArgs() == NumPlacementArgs + 2)
790        DeleteArgs.push_back(std::make_pair(RValue::get(AllocSize), *AI++));
791
792      // Pass the rest of the arguments, which must match exactly.
793      for (unsigned I = 0; I != NumPlacementArgs; ++I)
794        DeleteArgs.push_back(std::make_pair(getPlacementArgs()[I], *AI++));
795
796      // Call 'operator delete'.
797      CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT),
798                   CGF.CGM.GetAddrOfFunction(OperatorDelete),
799                   ReturnValueSlot(), DeleteArgs, OperatorDelete);
800    }
801  };
802
803  /// A cleanup to call the given 'operator delete' function upon
804  /// abnormal exit from a new expression when the new expression is
805  /// conditional.
806  class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup {
807    size_t NumPlacementArgs;
808    const FunctionDecl *OperatorDelete;
809    DominatingValue<RValue>::saved_type Ptr;
810    DominatingValue<RValue>::saved_type AllocSize;
811
812    DominatingValue<RValue>::saved_type *getPlacementArgs() {
813      return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
814    }
815
816  public:
817    static size_t getExtraSize(size_t NumPlacementArgs) {
818      return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
819    }
820
821    CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
822                                   const FunctionDecl *OperatorDelete,
823                                   DominatingValue<RValue>::saved_type Ptr,
824                              DominatingValue<RValue>::saved_type AllocSize)
825      : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
826        Ptr(Ptr), AllocSize(AllocSize) {}
827
828    void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
829      assert(I < NumPlacementArgs && "index out of range");
830      getPlacementArgs()[I] = Arg;
831    }
832
833    void Emit(CodeGenFunction &CGF, bool IsForEH) {
834      const FunctionProtoType *FPT
835        = OperatorDelete->getType()->getAs<FunctionProtoType>();
836      assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
837             (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
838
839      CallArgList DeleteArgs;
840
841      // The first argument is always a void*.
842      FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
843      DeleteArgs.push_back(std::make_pair(Ptr.restore(CGF), *AI++));
844
845      // A member 'operator delete' can take an extra 'size_t' argument.
846      if (FPT->getNumArgs() == NumPlacementArgs + 2) {
847        RValue RV = AllocSize.restore(CGF);
848        DeleteArgs.push_back(std::make_pair(RV, *AI++));
849      }
850
851      // Pass the rest of the arguments, which must match exactly.
852      for (unsigned I = 0; I != NumPlacementArgs; ++I) {
853        RValue RV = getPlacementArgs()[I].restore(CGF);
854        DeleteArgs.push_back(std::make_pair(RV, *AI++));
855      }
856
857      // Call 'operator delete'.
858      CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT),
859                   CGF.CGM.GetAddrOfFunction(OperatorDelete),
860                   ReturnValueSlot(), DeleteArgs, OperatorDelete);
861    }
862  };
863}
864
865/// Enter a cleanup to call 'operator delete' if the initializer in a
866/// new-expression throws.
867static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
868                                  const CXXNewExpr *E,
869                                  llvm::Value *NewPtr,
870                                  llvm::Value *AllocSize,
871                                  const CallArgList &NewArgs) {
872  // If we're not inside a conditional branch, then the cleanup will
873  // dominate and we can do the easier (and more efficient) thing.
874  if (!CGF.isInConditionalBranch()) {
875    CallDeleteDuringNew *Cleanup = CGF.EHStack
876      .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
877                                                 E->getNumPlacementArgs(),
878                                                 E->getOperatorDelete(),
879                                                 NewPtr, AllocSize);
880    for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
881      Cleanup->setPlacementArg(I, NewArgs[I+1].first);
882
883    return;
884  }
885
886  // Otherwise, we need to save all this stuff.
887  DominatingValue<RValue>::saved_type SavedNewPtr =
888    DominatingValue<RValue>::save(CGF, RValue::get(NewPtr));
889  DominatingValue<RValue>::saved_type SavedAllocSize =
890    DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
891
892  CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
893    .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(InactiveEHCleanup,
894                                                 E->getNumPlacementArgs(),
895                                                 E->getOperatorDelete(),
896                                                 SavedNewPtr,
897                                                 SavedAllocSize);
898  for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
899    Cleanup->setPlacementArg(I,
900                     DominatingValue<RValue>::save(CGF, NewArgs[I+1].first));
901
902  CGF.ActivateCleanupBlock(CGF.EHStack.stable_begin());
903}
904
905llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
906  QualType AllocType = E->getAllocatedType();
907  if (AllocType->isArrayType())
908    while (const ArrayType *AType = getContext().getAsArrayType(AllocType))
909      AllocType = AType->getElementType();
910
911  FunctionDecl *NewFD = E->getOperatorNew();
912  const FunctionProtoType *NewFTy = NewFD->getType()->getAs<FunctionProtoType>();
913
914  CallArgList NewArgs;
915
916  // The allocation size is the first argument.
917  QualType SizeTy = getContext().getSizeType();
918
919  llvm::Value *NumElements = 0;
920  llvm::Value *AllocSizeWithoutCookie = 0;
921  llvm::Value *AllocSize = EmitCXXNewAllocSize(getContext(),
922                                               *this, E, NumElements,
923                                               AllocSizeWithoutCookie);
924
925  NewArgs.push_back(std::make_pair(RValue::get(AllocSize), SizeTy));
926
927  // Emit the rest of the arguments.
928  // FIXME: Ideally, this should just use EmitCallArgs.
929  CXXNewExpr::const_arg_iterator NewArg = E->placement_arg_begin();
930
931  // First, use the types from the function type.
932  // We start at 1 here because the first argument (the allocation size)
933  // has already been emitted.
934  for (unsigned i = 1, e = NewFTy->getNumArgs(); i != e; ++i, ++NewArg) {
935    QualType ArgType = NewFTy->getArgType(i);
936
937    assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
938           getTypePtr() ==
939           getContext().getCanonicalType(NewArg->getType()).getTypePtr() &&
940           "type mismatch in call argument!");
941
942    NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
943                                     ArgType));
944
945  }
946
947  // Either we've emitted all the call args, or we have a call to a
948  // variadic function.
949  assert((NewArg == E->placement_arg_end() || NewFTy->isVariadic()) &&
950         "Extra arguments in non-variadic function!");
951
952  // If we still have any arguments, emit them using the type of the argument.
953  for (CXXNewExpr::const_arg_iterator NewArgEnd = E->placement_arg_end();
954       NewArg != NewArgEnd; ++NewArg) {
955    QualType ArgType = NewArg->getType();
956    NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
957                                     ArgType));
958  }
959
960  // Emit the call to new.
961  RValue RV =
962    EmitCall(CGM.getTypes().getFunctionInfo(NewArgs, NewFTy),
963             CGM.GetAddrOfFunction(NewFD), ReturnValueSlot(), NewArgs, NewFD);
964
965  // If an allocation function is declared with an empty exception specification
966  // it returns null to indicate failure to allocate storage. [expr.new]p13.
967  // (We don't need to check for null when there's no new initializer and
968  // we're allocating a POD type).
969  bool NullCheckResult = NewFTy->hasEmptyExceptionSpec() &&
970    !(AllocType->isPODType() && !E->hasInitializer());
971
972  llvm::BasicBlock *NullCheckSource = 0;
973  llvm::BasicBlock *NewNotNull = 0;
974  llvm::BasicBlock *NewEnd = 0;
975
976  llvm::Value *NewPtr = RV.getScalarVal();
977  unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace();
978
979  if (NullCheckResult) {
980    NullCheckSource = Builder.GetInsertBlock();
981    NewNotNull = createBasicBlock("new.notnull");
982    NewEnd = createBasicBlock("new.end");
983
984    llvm::Value *IsNull = Builder.CreateIsNull(NewPtr, "new.isnull");
985    Builder.CreateCondBr(IsNull, NewEnd, NewNotNull);
986    EmitBlock(NewNotNull);
987  }
988
989  assert((AllocSize == AllocSizeWithoutCookie) ==
990         CalculateCookiePadding(*this, E).isZero());
991  if (AllocSize != AllocSizeWithoutCookie) {
992    assert(E->isArray());
993    NewPtr = CGM.getCXXABI().InitializeArrayCookie(CGF, NewPtr, NumElements,
994                                                   E, AllocType);
995  }
996
997  // If there's an operator delete, enter a cleanup to call it if an
998  // exception is thrown.
999  EHScopeStack::stable_iterator CallOperatorDelete;
1000  if (E->getOperatorDelete()) {
1001    EnterNewDeleteCleanup(*this, E, NewPtr, AllocSize, NewArgs);
1002    CallOperatorDelete = EHStack.stable_begin();
1003  }
1004
1005  const llvm::Type *ElementPtrTy
1006    = ConvertTypeForMem(AllocType)->getPointerTo(AS);
1007  NewPtr = Builder.CreateBitCast(NewPtr, ElementPtrTy);
1008
1009  if (E->isArray()) {
1010    EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie);
1011
1012    // NewPtr is a pointer to the base element type.  If we're
1013    // allocating an array of arrays, we'll need to cast back to the
1014    // array pointer type.
1015    const llvm::Type *ResultTy = ConvertTypeForMem(E->getType());
1016    if (NewPtr->getType() != ResultTy)
1017      NewPtr = Builder.CreateBitCast(NewPtr, ResultTy);
1018  } else {
1019    EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie);
1020  }
1021
1022  // Deactivate the 'operator delete' cleanup if we finished
1023  // initialization.
1024  if (CallOperatorDelete.isValid())
1025    DeactivateCleanupBlock(CallOperatorDelete);
1026
1027  if (NullCheckResult) {
1028    Builder.CreateBr(NewEnd);
1029    llvm::BasicBlock *NotNullSource = Builder.GetInsertBlock();
1030    EmitBlock(NewEnd);
1031
1032    llvm::PHINode *PHI = Builder.CreatePHI(NewPtr->getType());
1033    PHI->reserveOperandSpace(2);
1034    PHI->addIncoming(NewPtr, NotNullSource);
1035    PHI->addIncoming(llvm::Constant::getNullValue(NewPtr->getType()),
1036                     NullCheckSource);
1037
1038    NewPtr = PHI;
1039  }
1040
1041  return NewPtr;
1042}
1043
1044void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1045                                     llvm::Value *Ptr,
1046                                     QualType DeleteTy) {
1047  assert(DeleteFD->getOverloadedOperator() == OO_Delete);
1048
1049  const FunctionProtoType *DeleteFTy =
1050    DeleteFD->getType()->getAs<FunctionProtoType>();
1051
1052  CallArgList DeleteArgs;
1053
1054  // Check if we need to pass the size to the delete operator.
1055  llvm::Value *Size = 0;
1056  QualType SizeTy;
1057  if (DeleteFTy->getNumArgs() == 2) {
1058    SizeTy = DeleteFTy->getArgType(1);
1059    CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
1060    Size = llvm::ConstantInt::get(ConvertType(SizeTy),
1061                                  DeleteTypeSize.getQuantity());
1062  }
1063
1064  QualType ArgTy = DeleteFTy->getArgType(0);
1065  llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
1066  DeleteArgs.push_back(std::make_pair(RValue::get(DeletePtr), ArgTy));
1067
1068  if (Size)
1069    DeleteArgs.push_back(std::make_pair(RValue::get(Size), SizeTy));
1070
1071  // Emit the call to delete.
1072  EmitCall(CGM.getTypes().getFunctionInfo(DeleteArgs, DeleteFTy),
1073           CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(),
1074           DeleteArgs, DeleteFD);
1075}
1076
1077namespace {
1078  /// Calls the given 'operator delete' on a single object.
1079  struct CallObjectDelete : EHScopeStack::Cleanup {
1080    llvm::Value *Ptr;
1081    const FunctionDecl *OperatorDelete;
1082    QualType ElementType;
1083
1084    CallObjectDelete(llvm::Value *Ptr,
1085                     const FunctionDecl *OperatorDelete,
1086                     QualType ElementType)
1087      : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1088
1089    void Emit(CodeGenFunction &CGF, bool IsForEH) {
1090      CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
1091    }
1092  };
1093}
1094
1095/// Emit the code for deleting a single object.
1096static void EmitObjectDelete(CodeGenFunction &CGF,
1097                             const FunctionDecl *OperatorDelete,
1098                             llvm::Value *Ptr,
1099                             QualType ElementType) {
1100  // Find the destructor for the type, if applicable.  If the
1101  // destructor is virtual, we'll just emit the vcall and return.
1102  const CXXDestructorDecl *Dtor = 0;
1103  if (const RecordType *RT = ElementType->getAs<RecordType>()) {
1104    CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1105    if (!RD->hasTrivialDestructor()) {
1106      Dtor = RD->getDestructor();
1107
1108      if (Dtor->isVirtual()) {
1109        const llvm::Type *Ty =
1110          CGF.getTypes().GetFunctionType(CGF.getTypes().getFunctionInfo(Dtor,
1111                                                               Dtor_Complete),
1112                                         /*isVariadic=*/false);
1113
1114        llvm::Value *Callee
1115          = CGF.BuildVirtualCall(Dtor, Dtor_Deleting, Ptr, Ty);
1116        CGF.EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0,
1117                              0, 0);
1118
1119        // The dtor took care of deleting the object.
1120        return;
1121      }
1122    }
1123  }
1124
1125  // Make sure that we call delete even if the dtor throws.
1126  // This doesn't have to a conditional cleanup because we're going
1127  // to pop it off in a second.
1128  CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1129                                            Ptr, OperatorDelete, ElementType);
1130
1131  if (Dtor)
1132    CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1133                              /*ForVirtualBase=*/false, Ptr);
1134
1135  CGF.PopCleanupBlock();
1136}
1137
1138namespace {
1139  /// Calls the given 'operator delete' on an array of objects.
1140  struct CallArrayDelete : EHScopeStack::Cleanup {
1141    llvm::Value *Ptr;
1142    const FunctionDecl *OperatorDelete;
1143    llvm::Value *NumElements;
1144    QualType ElementType;
1145    CharUnits CookieSize;
1146
1147    CallArrayDelete(llvm::Value *Ptr,
1148                    const FunctionDecl *OperatorDelete,
1149                    llvm::Value *NumElements,
1150                    QualType ElementType,
1151                    CharUnits CookieSize)
1152      : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
1153        ElementType(ElementType), CookieSize(CookieSize) {}
1154
1155    void Emit(CodeGenFunction &CGF, bool IsForEH) {
1156      const FunctionProtoType *DeleteFTy =
1157        OperatorDelete->getType()->getAs<FunctionProtoType>();
1158      assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2);
1159
1160      CallArgList Args;
1161
1162      // Pass the pointer as the first argument.
1163      QualType VoidPtrTy = DeleteFTy->getArgType(0);
1164      llvm::Value *DeletePtr
1165        = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
1166      Args.push_back(std::make_pair(RValue::get(DeletePtr), VoidPtrTy));
1167
1168      // Pass the original requested size as the second argument.
1169      if (DeleteFTy->getNumArgs() == 2) {
1170        QualType size_t = DeleteFTy->getArgType(1);
1171        const llvm::IntegerType *SizeTy
1172          = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
1173
1174        CharUnits ElementTypeSize =
1175          CGF.CGM.getContext().getTypeSizeInChars(ElementType);
1176
1177        // The size of an element, multiplied by the number of elements.
1178        llvm::Value *Size
1179          = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
1180        Size = CGF.Builder.CreateMul(Size, NumElements);
1181
1182        // Plus the size of the cookie if applicable.
1183        if (!CookieSize.isZero()) {
1184          llvm::Value *CookieSizeV
1185            = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
1186          Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
1187        }
1188
1189        Args.push_back(std::make_pair(RValue::get(Size), size_t));
1190      }
1191
1192      // Emit the call to delete.
1193      CGF.EmitCall(CGF.getTypes().getFunctionInfo(Args, DeleteFTy),
1194                   CGF.CGM.GetAddrOfFunction(OperatorDelete),
1195                   ReturnValueSlot(), Args, OperatorDelete);
1196    }
1197  };
1198}
1199
1200/// Emit the code for deleting an array of objects.
1201static void EmitArrayDelete(CodeGenFunction &CGF,
1202                            const CXXDeleteExpr *E,
1203                            llvm::Value *Ptr,
1204                            QualType ElementType) {
1205  llvm::Value *NumElements = 0;
1206  llvm::Value *AllocatedPtr = 0;
1207  CharUnits CookieSize;
1208  CGF.CGM.getCXXABI().ReadArrayCookie(CGF, Ptr, E, ElementType,
1209                                      NumElements, AllocatedPtr, CookieSize);
1210
1211  assert(AllocatedPtr && "ReadArrayCookie didn't set AllocatedPtr");
1212
1213  // Make sure that we call delete even if one of the dtors throws.
1214  const FunctionDecl *OperatorDelete = E->getOperatorDelete();
1215  CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
1216                                           AllocatedPtr, OperatorDelete,
1217                                           NumElements, ElementType,
1218                                           CookieSize);
1219
1220  if (const CXXRecordDecl *RD = ElementType->getAsCXXRecordDecl()) {
1221    if (!RD->hasTrivialDestructor()) {
1222      assert(NumElements && "ReadArrayCookie didn't find element count"
1223                            " for a class with destructor");
1224      CGF.EmitCXXAggrDestructorCall(RD->getDestructor(), NumElements, Ptr);
1225    }
1226  }
1227
1228  CGF.PopCleanupBlock();
1229}
1230
1231void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
1232
1233  // Get at the argument before we performed the implicit conversion
1234  // to void*.
1235  const Expr *Arg = E->getArgument();
1236  while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
1237    if (ICE->getCastKind() != CK_UserDefinedConversion &&
1238        ICE->getType()->isVoidPointerType())
1239      Arg = ICE->getSubExpr();
1240    else
1241      break;
1242  }
1243
1244  llvm::Value *Ptr = EmitScalarExpr(Arg);
1245
1246  // Null check the pointer.
1247  llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
1248  llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
1249
1250  llvm::Value *IsNull =
1251    Builder.CreateICmpEQ(Ptr, llvm::Constant::getNullValue(Ptr->getType()),
1252                         "isnull");
1253
1254  Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
1255  EmitBlock(DeleteNotNull);
1256
1257  // We might be deleting a pointer to array.  If so, GEP down to the
1258  // first non-array element.
1259  // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
1260  QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
1261  if (DeleteTy->isConstantArrayType()) {
1262    llvm::Value *Zero = Builder.getInt32(0);
1263    llvm::SmallVector<llvm::Value*,8> GEP;
1264
1265    GEP.push_back(Zero); // point at the outermost array
1266
1267    // For each layer of array type we're pointing at:
1268    while (const ConstantArrayType *Arr
1269             = getContext().getAsConstantArrayType(DeleteTy)) {
1270      // 1. Unpeel the array type.
1271      DeleteTy = Arr->getElementType();
1272
1273      // 2. GEP to the first element of the array.
1274      GEP.push_back(Zero);
1275    }
1276
1277    Ptr = Builder.CreateInBoundsGEP(Ptr, GEP.begin(), GEP.end(), "del.first");
1278  }
1279
1280  assert(ConvertTypeForMem(DeleteTy) ==
1281         cast<llvm::PointerType>(Ptr->getType())->getElementType());
1282
1283  if (E->isArrayForm()) {
1284    EmitArrayDelete(*this, E, Ptr, DeleteTy);
1285  } else {
1286    EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy);
1287  }
1288
1289  EmitBlock(DeleteEnd);
1290}
1291
1292llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
1293  QualType Ty = E->getType();
1294  const llvm::Type *LTy = ConvertType(Ty)->getPointerTo();
1295
1296  if (E->isTypeOperand()) {
1297    llvm::Constant *TypeInfo =
1298      CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand());
1299    return Builder.CreateBitCast(TypeInfo, LTy);
1300  }
1301
1302  Expr *subE = E->getExprOperand();
1303  Ty = subE->getType();
1304  CanQualType CanTy = CGM.getContext().getCanonicalType(Ty);
1305  Ty = CanTy.getUnqualifiedType().getNonReferenceType();
1306  if (const RecordType *RT = Ty->getAs<RecordType>()) {
1307    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1308    if (RD->isPolymorphic()) {
1309      // FIXME: if subE is an lvalue do
1310      LValue Obj = EmitLValue(subE);
1311      llvm::Value *This = Obj.getAddress();
1312      // We need to do a zero check for *p, unless it has NonNullAttr.
1313      // FIXME: PointerType->hasAttr<NonNullAttr>()
1314      bool CanBeZero = false;
1315      if (UnaryOperator *UO = dyn_cast<UnaryOperator>(subE->IgnoreParens()))
1316        if (UO->getOpcode() == UO_Deref)
1317          CanBeZero = true;
1318      if (CanBeZero) {
1319        llvm::BasicBlock *NonZeroBlock = createBasicBlock();
1320        llvm::BasicBlock *ZeroBlock = createBasicBlock();
1321
1322        llvm::Value *Zero = llvm::Constant::getNullValue(This->getType());
1323        Builder.CreateCondBr(Builder.CreateICmpNE(This, Zero),
1324                             NonZeroBlock, ZeroBlock);
1325        EmitBlock(ZeroBlock);
1326        /// Call __cxa_bad_typeid
1327        const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
1328        const llvm::FunctionType *FTy;
1329        FTy = llvm::FunctionType::get(ResultType, false);
1330        llvm::Value *F = CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1331        Builder.CreateCall(F)->setDoesNotReturn();
1332        Builder.CreateUnreachable();
1333        EmitBlock(NonZeroBlock);
1334      }
1335      llvm::Value *V = GetVTablePtr(This, LTy->getPointerTo());
1336      V = Builder.CreateConstInBoundsGEP1_64(V, -1ULL);
1337      V = Builder.CreateLoad(V);
1338      return V;
1339    }
1340  }
1341  return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(Ty), LTy);
1342}
1343
1344llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *V,
1345                                              const CXXDynamicCastExpr *DCE) {
1346  QualType SrcTy = DCE->getSubExpr()->getType();
1347  QualType DestTy = DCE->getTypeAsWritten();
1348  QualType InnerType = DestTy->getPointeeType();
1349
1350  const llvm::Type *LTy = ConvertType(DCE->getType());
1351
1352  bool CanBeZero = false;
1353  bool ToVoid = false;
1354  bool ThrowOnBad = false;
1355  if (DestTy->isPointerType()) {
1356    // FIXME: if PointerType->hasAttr<NonNullAttr>(), we don't set this
1357    CanBeZero = true;
1358    if (InnerType->isVoidType())
1359      ToVoid = true;
1360  } else {
1361    LTy = LTy->getPointerTo();
1362
1363    // FIXME: What if exceptions are disabled?
1364    ThrowOnBad = true;
1365  }
1366
1367  if (SrcTy->isPointerType() || SrcTy->isReferenceType())
1368    SrcTy = SrcTy->getPointeeType();
1369  SrcTy = SrcTy.getUnqualifiedType();
1370
1371  if (DestTy->isPointerType() || DestTy->isReferenceType())
1372    DestTy = DestTy->getPointeeType();
1373  DestTy = DestTy.getUnqualifiedType();
1374
1375  llvm::BasicBlock *ContBlock = createBasicBlock();
1376  llvm::BasicBlock *NullBlock = 0;
1377  llvm::BasicBlock *NonZeroBlock = 0;
1378  if (CanBeZero) {
1379    NonZeroBlock = createBasicBlock();
1380    NullBlock = createBasicBlock();
1381    Builder.CreateCondBr(Builder.CreateIsNotNull(V), NonZeroBlock, NullBlock);
1382    EmitBlock(NonZeroBlock);
1383  }
1384
1385  llvm::BasicBlock *BadCastBlock = 0;
1386
1387  const llvm::Type *PtrDiffTy = ConvertType(getContext().getPointerDiffType());
1388
1389  // See if this is a dynamic_cast(void*)
1390  if (ToVoid) {
1391    llvm::Value *This = V;
1392    V = GetVTablePtr(This, PtrDiffTy->getPointerTo());
1393    V = Builder.CreateConstInBoundsGEP1_64(V, -2ULL);
1394    V = Builder.CreateLoad(V, "offset to top");
1395    This = Builder.CreateBitCast(This, llvm::Type::getInt8PtrTy(VMContext));
1396    V = Builder.CreateInBoundsGEP(This, V);
1397    V = Builder.CreateBitCast(V, LTy);
1398  } else {
1399    /// Call __dynamic_cast
1400    const llvm::Type *ResultType = llvm::Type::getInt8PtrTy(VMContext);
1401    const llvm::FunctionType *FTy;
1402    std::vector<const llvm::Type*> ArgTys;
1403    const llvm::Type *PtrToInt8Ty
1404      = llvm::Type::getInt8Ty(VMContext)->getPointerTo();
1405    ArgTys.push_back(PtrToInt8Ty);
1406    ArgTys.push_back(PtrToInt8Ty);
1407    ArgTys.push_back(PtrToInt8Ty);
1408    ArgTys.push_back(PtrDiffTy);
1409    FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
1410
1411    // FIXME: Calculate better hint.
1412    llvm::Value *hint = llvm::ConstantInt::get(PtrDiffTy, -1ULL);
1413
1414    assert(SrcTy->isRecordType() && "Src type must be record type!");
1415    assert(DestTy->isRecordType() && "Dest type must be record type!");
1416
1417    llvm::Value *SrcArg
1418      = CGM.GetAddrOfRTTIDescriptor(SrcTy.getUnqualifiedType());
1419    llvm::Value *DestArg
1420      = CGM.GetAddrOfRTTIDescriptor(DestTy.getUnqualifiedType());
1421
1422    V = Builder.CreateBitCast(V, PtrToInt8Ty);
1423    V = Builder.CreateCall4(CGM.CreateRuntimeFunction(FTy, "__dynamic_cast"),
1424                            V, SrcArg, DestArg, hint);
1425    V = Builder.CreateBitCast(V, LTy);
1426
1427    if (ThrowOnBad) {
1428      BadCastBlock = createBasicBlock();
1429      Builder.CreateCondBr(Builder.CreateIsNotNull(V), ContBlock, BadCastBlock);
1430      EmitBlock(BadCastBlock);
1431      /// Invoke __cxa_bad_cast
1432      ResultType = llvm::Type::getVoidTy(VMContext);
1433      const llvm::FunctionType *FBadTy;
1434      FBadTy = llvm::FunctionType::get(ResultType, false);
1435      llvm::Value *F = CGM.CreateRuntimeFunction(FBadTy, "__cxa_bad_cast");
1436      if (llvm::BasicBlock *InvokeDest = getInvokeDest()) {
1437        llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
1438        Builder.CreateInvoke(F, Cont, InvokeDest)->setDoesNotReturn();
1439        EmitBlock(Cont);
1440      } else {
1441        // FIXME: Does this ever make sense?
1442        Builder.CreateCall(F)->setDoesNotReturn();
1443      }
1444      Builder.CreateUnreachable();
1445    }
1446  }
1447
1448  if (CanBeZero) {
1449    Builder.CreateBr(ContBlock);
1450    EmitBlock(NullBlock);
1451    Builder.CreateBr(ContBlock);
1452  }
1453  EmitBlock(ContBlock);
1454  if (CanBeZero) {
1455    llvm::PHINode *PHI = Builder.CreatePHI(LTy);
1456    PHI->reserveOperandSpace(2);
1457    PHI->addIncoming(V, NonZeroBlock);
1458    PHI->addIncoming(llvm::Constant::getNullValue(LTy), NullBlock);
1459    V = PHI;
1460  }
1461
1462  return V;
1463}
1464