CGExprCXX.cpp revision a7f633f522af786e80dc08dbd63e222c9414095b
1//===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This contains code dealing with code generation of C++ expressions
11//
12//===----------------------------------------------------------------------===//
13
14#include "clang/Frontend/CodeGenOptions.h"
15#include "CodeGenFunction.h"
16#include "CGCXXABI.h"
17#include "CGObjCRuntime.h"
18#include "CGDebugInfo.h"
19#include "llvm/Intrinsics.h"
20using namespace clang;
21using namespace CodeGen;
22
23RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
24                                          llvm::Value *Callee,
25                                          ReturnValueSlot ReturnValue,
26                                          llvm::Value *This,
27                                          llvm::Value *VTT,
28                                          CallExpr::const_arg_iterator ArgBeg,
29                                          CallExpr::const_arg_iterator ArgEnd) {
30  assert(MD->isInstance() &&
31         "Trying to emit a member call expr on a static method!");
32
33  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
34
35  CallArgList Args;
36
37  // Push the this ptr.
38  Args.push_back(std::make_pair(RValue::get(This),
39                                MD->getThisType(getContext())));
40
41  // If there is a VTT parameter, emit it.
42  if (VTT) {
43    QualType T = getContext().getPointerType(getContext().VoidPtrTy);
44    Args.push_back(std::make_pair(RValue::get(VTT), T));
45  }
46
47  // And the rest of the call args
48  EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
49
50  QualType ResultType = FPT->getResultType();
51  return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args,
52                                                 FPT->getExtInfo()),
53                  Callee, ReturnValue, Args, MD);
54}
55
56static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) {
57  const Expr *E = Base;
58
59  while (true) {
60    E = E->IgnoreParens();
61    if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
62      if (CE->getCastKind() == CK_DerivedToBase ||
63          CE->getCastKind() == CK_UncheckedDerivedToBase ||
64          CE->getCastKind() == CK_NoOp) {
65        E = CE->getSubExpr();
66        continue;
67      }
68    }
69
70    break;
71  }
72
73  QualType DerivedType = E->getType();
74  if (const PointerType *PTy = DerivedType->getAs<PointerType>())
75    DerivedType = PTy->getPointeeType();
76
77  return cast<CXXRecordDecl>(DerivedType->castAs<RecordType>()->getDecl());
78}
79
80/// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
81/// expr can be devirtualized.
82static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
83                                               const Expr *Base,
84                                               const CXXMethodDecl *MD) {
85
86  // When building with -fapple-kext, all calls must go through the vtable since
87  // the kernel linker can do runtime patching of vtables.
88  if (Context.getLangOptions().AppleKext)
89    return false;
90
91  // If the most derived class is marked final, we know that no subclass can
92  // override this member function and so we can devirtualize it. For example:
93  //
94  // struct A { virtual void f(); }
95  // struct B final : A { };
96  //
97  // void f(B *b) {
98  //   b->f();
99  // }
100  //
101  const CXXRecordDecl *MostDerivedClassDecl = getMostDerivedClassDecl(Base);
102  if (MostDerivedClassDecl->hasAttr<FinalAttr>())
103    return true;
104
105  // If the member function is marked 'final', we know that it can't be
106  // overridden and can therefore devirtualize it.
107  if (MD->hasAttr<FinalAttr>())
108    return true;
109
110  // Similarly, if the class itself is marked 'final' it can't be overridden
111  // and we can therefore devirtualize the member function call.
112  if (MD->getParent()->hasAttr<FinalAttr>())
113    return true;
114
115  if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
116    if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
117      // This is a record decl. We know the type and can devirtualize it.
118      return VD->getType()->isRecordType();
119    }
120
121    return false;
122  }
123
124  // We can always devirtualize calls on temporary object expressions.
125  if (isa<CXXConstructExpr>(Base))
126    return true;
127
128  // And calls on bound temporaries.
129  if (isa<CXXBindTemporaryExpr>(Base))
130    return true;
131
132  // Check if this is a call expr that returns a record type.
133  if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
134    return CE->getCallReturnType()->isRecordType();
135
136  // We can't devirtualize the call.
137  return false;
138}
139
140// Note: This function also emit constructor calls to support a MSVC
141// extensions allowing explicit constructor function call.
142RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
143                                              ReturnValueSlot ReturnValue) {
144  if (isa<BinaryOperator>(CE->getCallee()->IgnoreParens()))
145    return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
146
147  const MemberExpr *ME = cast<MemberExpr>(CE->getCallee()->IgnoreParens());
148  const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
149
150  CGDebugInfo *DI = getDebugInfo();
151  if (DI && CGM.getCodeGenOpts().LimitDebugInfo
152      && !isa<CallExpr>(ME->getBase())) {
153    QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType();
154    if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) {
155      DI->getOrCreateRecordType(PTy->getPointeeType(),
156                                MD->getParent()->getLocation());
157    }
158  }
159
160  if (MD->isStatic()) {
161    // The method is static, emit it as we would a regular call.
162    llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
163    return EmitCall(getContext().getPointerType(MD->getType()), Callee,
164                    ReturnValue, CE->arg_begin(), CE->arg_end());
165  }
166
167  // Compute the object pointer.
168  llvm::Value *This;
169  if (ME->isArrow())
170    This = EmitScalarExpr(ME->getBase());
171  else
172    This = EmitLValue(ME->getBase()).getAddress();
173
174  if (MD->isTrivial()) {
175    if (isa<CXXDestructorDecl>(MD)) return RValue::get(0);
176    if (isa<CXXConstructorDecl>(MD) &&
177        cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
178      return RValue::get(0);
179
180    if (MD->isCopyAssignmentOperator()) {
181      // We don't like to generate the trivial copy assignment operator when
182      // it isn't necessary; just produce the proper effect here.
183      llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
184      EmitAggregateCopy(This, RHS, CE->getType());
185      return RValue::get(This);
186    }
187
188    if (isa<CXXConstructorDecl>(MD) &&
189        cast<CXXConstructorDecl>(MD)->isCopyConstructor()) {
190      llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
191      EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS,
192                                     CE->arg_begin(), CE->arg_end());
193      return RValue::get(This);
194    }
195    llvm_unreachable("unknown trivial member function");
196  }
197
198  // Compute the function type we're calling.
199  const CGFunctionInfo *FInfo = 0;
200  if (isa<CXXDestructorDecl>(MD))
201    FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXDestructorDecl>(MD),
202                                           Dtor_Complete);
203  else if (isa<CXXConstructorDecl>(MD))
204    FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXConstructorDecl>(MD),
205                                            Ctor_Complete);
206  else
207    FInfo = &CGM.getTypes().getFunctionInfo(MD);
208
209  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
210  const llvm::Type *Ty
211    = CGM.getTypes().GetFunctionType(*FInfo, FPT->isVariadic());
212
213  // C++ [class.virtual]p12:
214  //   Explicit qualification with the scope operator (5.1) suppresses the
215  //   virtual call mechanism.
216  //
217  // We also don't emit a virtual call if the base expression has a record type
218  // because then we know what the type is.
219  bool UseVirtualCall;
220  UseVirtualCall = MD->isVirtual() && !ME->hasQualifier()
221                   && !canDevirtualizeMemberFunctionCalls(getContext(),
222                                                          ME->getBase(), MD);
223  llvm::Value *Callee;
224  if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
225    if (UseVirtualCall) {
226      Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty);
227    } else {
228      if (getContext().getLangOptions().AppleKext &&
229          MD->isVirtual() &&
230          ME->hasQualifier())
231        Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
232      else
233        Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty);
234    }
235  } else if (const CXXConstructorDecl *Ctor =
236               dyn_cast<CXXConstructorDecl>(MD)) {
237    Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
238  } else if (UseVirtualCall) {
239      Callee = BuildVirtualCall(MD, This, Ty);
240  } else {
241    if (getContext().getLangOptions().AppleKext &&
242        MD->isVirtual() &&
243        ME->hasQualifier())
244      Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
245    else
246      Callee = CGM.GetAddrOfFunction(MD, Ty);
247  }
248
249  return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
250                           CE->arg_begin(), CE->arg_end());
251}
252
253RValue
254CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
255                                              ReturnValueSlot ReturnValue) {
256  const BinaryOperator *BO =
257      cast<BinaryOperator>(E->getCallee()->IgnoreParens());
258  const Expr *BaseExpr = BO->getLHS();
259  const Expr *MemFnExpr = BO->getRHS();
260
261  const MemberPointerType *MPT =
262    MemFnExpr->getType()->getAs<MemberPointerType>();
263
264  const FunctionProtoType *FPT =
265    MPT->getPointeeType()->getAs<FunctionProtoType>();
266  const CXXRecordDecl *RD =
267    cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
268
269  // Get the member function pointer.
270  llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
271
272  // Emit the 'this' pointer.
273  llvm::Value *This;
274
275  if (BO->getOpcode() == BO_PtrMemI)
276    This = EmitScalarExpr(BaseExpr);
277  else
278    This = EmitLValue(BaseExpr).getAddress();
279
280  // Ask the ABI to load the callee.  Note that This is modified.
281  llvm::Value *Callee =
282    CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, This, MemFnPtr, MPT);
283
284  CallArgList Args;
285
286  QualType ThisType =
287    getContext().getPointerType(getContext().getTagDeclType(RD));
288
289  // Push the this ptr.
290  Args.push_back(std::make_pair(RValue::get(This), ThisType));
291
292  // And the rest of the call args
293  EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
294  const FunctionType *BO_FPT = BO->getType()->getAs<FunctionProtoType>();
295  return EmitCall(CGM.getTypes().getFunctionInfo(Args, BO_FPT), Callee,
296                  ReturnValue, Args);
297}
298
299RValue
300CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
301                                               const CXXMethodDecl *MD,
302                                               ReturnValueSlot ReturnValue) {
303  assert(MD->isInstance() &&
304         "Trying to emit a member call expr on a static method!");
305  LValue LV = EmitLValue(E->getArg(0));
306  llvm::Value *This = LV.getAddress();
307
308  if (MD->isCopyAssignmentOperator()) {
309    const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(MD->getDeclContext());
310    if (ClassDecl->hasTrivialCopyAssignment()) {
311      assert(!ClassDecl->hasUserDeclaredCopyAssignment() &&
312             "EmitCXXOperatorMemberCallExpr - user declared copy assignment");
313      llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
314      QualType Ty = E->getType();
315      EmitAggregateCopy(This, Src, Ty);
316      return RValue::get(This);
317    }
318  }
319
320  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
321  const llvm::Type *Ty =
322    CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
323                                   FPT->isVariadic());
324  llvm::Value *Callee;
325  if (MD->isVirtual() &&
326      !canDevirtualizeMemberFunctionCalls(getContext(),
327                                           E->getArg(0), MD))
328    Callee = BuildVirtualCall(MD, This, Ty);
329  else
330    Callee = CGM.GetAddrOfFunction(MD, Ty);
331
332  return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
333                           E->arg_begin() + 1, E->arg_end());
334}
335
336void
337CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
338                                      AggValueSlot Dest) {
339  assert(!Dest.isIgnored() && "Must have a destination!");
340  const CXXConstructorDecl *CD = E->getConstructor();
341
342  // If we require zero initialization before (or instead of) calling the
343  // constructor, as can be the case with a non-user-provided default
344  // constructor, emit the zero initialization now.
345  if (E->requiresZeroInitialization())
346    EmitNullInitialization(Dest.getAddr(), E->getType());
347
348  // If this is a call to a trivial default constructor, do nothing.
349  if (CD->isTrivial() && CD->isDefaultConstructor())
350    return;
351
352  // Elide the constructor if we're constructing from a temporary.
353  // The temporary check is required because Sema sets this on NRVO
354  // returns.
355  if (getContext().getLangOptions().ElideConstructors && E->isElidable()) {
356    assert(getContext().hasSameUnqualifiedType(E->getType(),
357                                               E->getArg(0)->getType()));
358    if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
359      EmitAggExpr(E->getArg(0), Dest);
360      return;
361    }
362  }
363
364  const ConstantArrayType *Array
365    = getContext().getAsConstantArrayType(E->getType());
366  if (Array) {
367    QualType BaseElementTy = getContext().getBaseElementType(Array);
368    const llvm::Type *BasePtr = ConvertType(BaseElementTy);
369    BasePtr = llvm::PointerType::getUnqual(BasePtr);
370    llvm::Value *BaseAddrPtr =
371      Builder.CreateBitCast(Dest.getAddr(), BasePtr);
372
373    EmitCXXAggrConstructorCall(CD, Array, BaseAddrPtr,
374                               E->arg_begin(), E->arg_end());
375  }
376  else {
377    CXXCtorType Type =
378      (E->getConstructionKind() == CXXConstructExpr::CK_Complete)
379      ? Ctor_Complete : Ctor_Base;
380    bool ForVirtualBase =
381      E->getConstructionKind() == CXXConstructExpr::CK_VirtualBase;
382
383    // Call the constructor.
384    EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest.getAddr(),
385                           E->arg_begin(), E->arg_end());
386  }
387}
388
389void
390CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
391                                            llvm::Value *Src,
392                                            const Expr *Exp) {
393  if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
394    Exp = E->getSubExpr();
395  assert(isa<CXXConstructExpr>(Exp) &&
396         "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
397  const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
398  const CXXConstructorDecl *CD = E->getConstructor();
399  RunCleanupsScope Scope(*this);
400
401  // If we require zero initialization before (or instead of) calling the
402  // constructor, as can be the case with a non-user-provided default
403  // constructor, emit the zero initialization now.
404  // FIXME. Do I still need this for a copy ctor synthesis?
405  if (E->requiresZeroInitialization())
406    EmitNullInitialization(Dest, E->getType());
407
408  assert(!getContext().getAsConstantArrayType(E->getType())
409         && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
410  EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src,
411                                 E->arg_begin(), E->arg_end());
412}
413
414/// Check whether the given operator new[] is the global placement
415/// operator new[].
416static bool IsPlacementOperatorNewArray(ASTContext &Ctx,
417                                        const FunctionDecl *Fn) {
418  // Must be in global scope.  Note that allocation functions can't be
419  // declared in namespaces.
420  if (!Fn->getDeclContext()->getRedeclContext()->isFileContext())
421    return false;
422
423  // Signature must be void *operator new[](size_t, void*).
424  // The size_t is common to all operator new[]s.
425  if (Fn->getNumParams() != 2)
426    return false;
427
428  CanQualType ParamType = Ctx.getCanonicalType(Fn->getParamDecl(1)->getType());
429  return (ParamType == Ctx.VoidPtrTy);
430}
431
432static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
433                                        const CXXNewExpr *E) {
434  if (!E->isArray())
435    return CharUnits::Zero();
436
437  // No cookie is required if the new operator being used is
438  // ::operator new[](size_t, void*).
439  const FunctionDecl *OperatorNew = E->getOperatorNew();
440  if (IsPlacementOperatorNewArray(CGF.getContext(), OperatorNew))
441    return CharUnits::Zero();
442
443  return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
444}
445
446static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context,
447                                        CodeGenFunction &CGF,
448                                        const CXXNewExpr *E,
449                                        llvm::Value *&NumElements,
450                                        llvm::Value *&SizeWithoutCookie) {
451  QualType ElemType = E->getAllocatedType();
452
453  const llvm::IntegerType *SizeTy =
454    cast<llvm::IntegerType>(CGF.ConvertType(CGF.getContext().getSizeType()));
455
456  CharUnits TypeSize = CGF.getContext().getTypeSizeInChars(ElemType);
457
458  if (!E->isArray()) {
459    SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
460    return SizeWithoutCookie;
461  }
462
463  // Figure out the cookie size.
464  CharUnits CookieSize = CalculateCookiePadding(CGF, E);
465
466  // Emit the array size expression.
467  // We multiply the size of all dimensions for NumElements.
468  // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
469  NumElements = CGF.EmitScalarExpr(E->getArraySize());
470  assert(NumElements->getType() == SizeTy && "element count not a size_t");
471
472  uint64_t ArraySizeMultiplier = 1;
473  while (const ConstantArrayType *CAT
474             = CGF.getContext().getAsConstantArrayType(ElemType)) {
475    ElemType = CAT->getElementType();
476    ArraySizeMultiplier *= CAT->getSize().getZExtValue();
477  }
478
479  llvm::Value *Size;
480
481  // If someone is doing 'new int[42]' there is no need to do a dynamic check.
482  // Don't bloat the -O0 code.
483  if (llvm::ConstantInt *NumElementsC =
484        dyn_cast<llvm::ConstantInt>(NumElements)) {
485    llvm::APInt NEC = NumElementsC->getValue();
486    unsigned SizeWidth = NEC.getBitWidth();
487
488    // Determine if there is an overflow here by doing an extended multiply.
489    NEC = NEC.zext(SizeWidth*2);
490    llvm::APInt SC(SizeWidth*2, TypeSize.getQuantity());
491    SC *= NEC;
492
493    if (!CookieSize.isZero()) {
494      // Save the current size without a cookie.  We don't care if an
495      // overflow's already happened because SizeWithoutCookie isn't
496      // used if the allocator returns null or throws, as it should
497      // always do on an overflow.
498      llvm::APInt SWC = SC.trunc(SizeWidth);
499      SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, SWC);
500
501      // Add the cookie size.
502      SC += llvm::APInt(SizeWidth*2, CookieSize.getQuantity());
503    }
504
505    if (SC.countLeadingZeros() >= SizeWidth) {
506      SC = SC.trunc(SizeWidth);
507      Size = llvm::ConstantInt::get(SizeTy, SC);
508    } else {
509      // On overflow, produce a -1 so operator new throws.
510      Size = llvm::Constant::getAllOnesValue(SizeTy);
511    }
512
513    // Scale NumElements while we're at it.
514    uint64_t N = NEC.getZExtValue() * ArraySizeMultiplier;
515    NumElements = llvm::ConstantInt::get(SizeTy, N);
516
517  // Otherwise, we don't need to do an overflow-checked multiplication if
518  // we're multiplying by one.
519  } else if (TypeSize.isOne()) {
520    assert(ArraySizeMultiplier == 1);
521
522    Size = NumElements;
523
524    // If we need a cookie, add its size in with an overflow check.
525    // This is maybe a little paranoid.
526    if (!CookieSize.isZero()) {
527      SizeWithoutCookie = Size;
528
529      llvm::Value *CookieSizeV
530        = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
531
532      const llvm::Type *Types[] = { SizeTy };
533      llvm::Value *UAddF
534        = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1);
535      llvm::Value *AddRes
536        = CGF.Builder.CreateCall2(UAddF, Size, CookieSizeV);
537
538      Size = CGF.Builder.CreateExtractValue(AddRes, 0);
539      llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1);
540      Size = CGF.Builder.CreateSelect(DidOverflow,
541                                      llvm::ConstantInt::get(SizeTy, -1),
542                                      Size);
543    }
544
545  // Otherwise use the int.umul.with.overflow intrinsic.
546  } else {
547    llvm::Value *OutermostElementSize
548      = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
549
550    llvm::Value *NumOutermostElements = NumElements;
551
552    // Scale NumElements by the array size multiplier.  This might
553    // overflow, but only if the multiplication below also overflows,
554    // in which case this multiplication isn't used.
555    if (ArraySizeMultiplier != 1)
556      NumElements = CGF.Builder.CreateMul(NumElements,
557                         llvm::ConstantInt::get(SizeTy, ArraySizeMultiplier));
558
559    // The requested size of the outermost array is non-constant.
560    // Multiply that by the static size of the elements of that array;
561    // on unsigned overflow, set the size to -1 to trigger an
562    // exception from the allocation routine.  This is sufficient to
563    // prevent buffer overruns from the allocator returning a
564    // seemingly valid pointer to insufficient space.  This idea comes
565    // originally from MSVC, and GCC has an open bug requesting
566    // similar behavior:
567    //   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=19351
568    //
569    // This will not be sufficient for C++0x, which requires a
570    // specific exception class (std::bad_array_new_length).
571    // That will require ABI support that has not yet been specified.
572    const llvm::Type *Types[] = { SizeTy };
573    llvm::Value *UMulF
574      = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, Types, 1);
575    llvm::Value *MulRes = CGF.Builder.CreateCall2(UMulF, NumOutermostElements,
576                                                  OutermostElementSize);
577
578    // The overflow bit.
579    llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(MulRes, 1);
580
581    // The result of the multiplication.
582    Size = CGF.Builder.CreateExtractValue(MulRes, 0);
583
584    // If we have a cookie, we need to add that size in, too.
585    if (!CookieSize.isZero()) {
586      SizeWithoutCookie = Size;
587
588      llvm::Value *CookieSizeV
589        = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
590      llvm::Value *UAddF
591        = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1);
592      llvm::Value *AddRes
593        = CGF.Builder.CreateCall2(UAddF, SizeWithoutCookie, CookieSizeV);
594
595      Size = CGF.Builder.CreateExtractValue(AddRes, 0);
596
597      llvm::Value *AddDidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1);
598      DidOverflow = CGF.Builder.CreateAnd(DidOverflow, AddDidOverflow);
599    }
600
601    Size = CGF.Builder.CreateSelect(DidOverflow,
602                                    llvm::ConstantInt::get(SizeTy, -1),
603                                    Size);
604  }
605
606  if (CookieSize.isZero())
607    SizeWithoutCookie = Size;
608  else
609    assert(SizeWithoutCookie && "didn't set SizeWithoutCookie?");
610
611  return Size;
612}
613
614static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E,
615                                    llvm::Value *NewPtr) {
616
617  assert(E->getNumConstructorArgs() == 1 &&
618         "Can only have one argument to initializer of POD type.");
619
620  const Expr *Init = E->getConstructorArg(0);
621  QualType AllocType = E->getAllocatedType();
622
623  unsigned Alignment =
624    CGF.getContext().getTypeAlignInChars(AllocType).getQuantity();
625  if (!CGF.hasAggregateLLVMType(AllocType))
626    CGF.EmitStoreOfScalar(CGF.EmitScalarExpr(Init), NewPtr,
627                          AllocType.isVolatileQualified(), Alignment,
628                          AllocType);
629  else if (AllocType->isAnyComplexType())
630    CGF.EmitComplexExprIntoAddr(Init, NewPtr,
631                                AllocType.isVolatileQualified());
632  else {
633    AggValueSlot Slot
634      = AggValueSlot::forAddr(NewPtr, AllocType.isVolatileQualified(), true);
635    CGF.EmitAggExpr(Init, Slot);
636  }
637}
638
639void
640CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
641                                         llvm::Value *NewPtr,
642                                         llvm::Value *NumElements) {
643  // We have a POD type.
644  if (E->getNumConstructorArgs() == 0)
645    return;
646
647  const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
648
649  // Create a temporary for the loop index and initialize it with 0.
650  llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index");
651  llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
652  Builder.CreateStore(Zero, IndexPtr);
653
654  // Start the loop with a block that tests the condition.
655  llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
656  llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
657
658  EmitBlock(CondBlock);
659
660  llvm::BasicBlock *ForBody = createBasicBlock("for.body");
661
662  // Generate: if (loop-index < number-of-elements fall to the loop body,
663  // otherwise, go to the block after the for-loop.
664  llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
665  llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElements, "isless");
666  // If the condition is true, execute the body.
667  Builder.CreateCondBr(IsLess, ForBody, AfterFor);
668
669  EmitBlock(ForBody);
670
671  llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
672  // Inside the loop body, emit the constructor call on the array element.
673  Counter = Builder.CreateLoad(IndexPtr);
674  llvm::Value *Address = Builder.CreateInBoundsGEP(NewPtr, Counter,
675                                                   "arrayidx");
676  StoreAnyExprIntoOneUnit(*this, E, Address);
677
678  EmitBlock(ContinueBlock);
679
680  // Emit the increment of the loop counter.
681  llvm::Value *NextVal = llvm::ConstantInt::get(SizeTy, 1);
682  Counter = Builder.CreateLoad(IndexPtr);
683  NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
684  Builder.CreateStore(NextVal, IndexPtr);
685
686  // Finally, branch back up to the condition for the next iteration.
687  EmitBranch(CondBlock);
688
689  // Emit the fall-through block.
690  EmitBlock(AfterFor, true);
691}
692
693static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
694                           llvm::Value *NewPtr, llvm::Value *Size) {
695  CGF.EmitCastToVoidPtr(NewPtr);
696  CharUnits Alignment = CGF.getContext().getTypeAlignInChars(T);
697  CGF.Builder.CreateMemSet(NewPtr, CGF.Builder.getInt8(0), Size,
698                           Alignment.getQuantity(), false);
699}
700
701static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
702                               llvm::Value *NewPtr,
703                               llvm::Value *NumElements,
704                               llvm::Value *AllocSizeWithoutCookie) {
705  if (E->isArray()) {
706    if (CXXConstructorDecl *Ctor = E->getConstructor()) {
707      bool RequiresZeroInitialization = false;
708      if (Ctor->getParent()->hasTrivialConstructor()) {
709        // If new expression did not specify value-initialization, then there
710        // is no initialization.
711        if (!E->hasInitializer() || Ctor->getParent()->isEmpty())
712          return;
713
714        if (CGF.CGM.getTypes().isZeroInitializable(E->getAllocatedType())) {
715          // Optimization: since zero initialization will just set the memory
716          // to all zeroes, generate a single memset to do it in one shot.
717          EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr,
718                         AllocSizeWithoutCookie);
719          return;
720        }
721
722        RequiresZeroInitialization = true;
723      }
724
725      CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
726                                     E->constructor_arg_begin(),
727                                     E->constructor_arg_end(),
728                                     RequiresZeroInitialization);
729      return;
730    } else if (E->getNumConstructorArgs() == 1 &&
731               isa<ImplicitValueInitExpr>(E->getConstructorArg(0))) {
732      // Optimization: since zero initialization will just set the memory
733      // to all zeroes, generate a single memset to do it in one shot.
734      EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr,
735                     AllocSizeWithoutCookie);
736      return;
737    } else {
738      CGF.EmitNewArrayInitializer(E, NewPtr, NumElements);
739      return;
740    }
741  }
742
743  if (CXXConstructorDecl *Ctor = E->getConstructor()) {
744    // Per C++ [expr.new]p15, if we have an initializer, then we're performing
745    // direct initialization. C++ [dcl.init]p5 requires that we
746    // zero-initialize storage if there are no user-declared constructors.
747    if (E->hasInitializer() &&
748        !Ctor->getParent()->hasUserDeclaredConstructor() &&
749        !Ctor->getParent()->isEmpty())
750      CGF.EmitNullInitialization(NewPtr, E->getAllocatedType());
751
752    CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
753                               NewPtr, E->constructor_arg_begin(),
754                               E->constructor_arg_end());
755
756    return;
757  }
758  // We have a POD type.
759  if (E->getNumConstructorArgs() == 0)
760    return;
761
762  StoreAnyExprIntoOneUnit(CGF, E, NewPtr);
763}
764
765namespace {
766  /// A cleanup to call the given 'operator delete' function upon
767  /// abnormal exit from a new expression.
768  class CallDeleteDuringNew : public EHScopeStack::Cleanup {
769    size_t NumPlacementArgs;
770    const FunctionDecl *OperatorDelete;
771    llvm::Value *Ptr;
772    llvm::Value *AllocSize;
773
774    RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
775
776  public:
777    static size_t getExtraSize(size_t NumPlacementArgs) {
778      return NumPlacementArgs * sizeof(RValue);
779    }
780
781    CallDeleteDuringNew(size_t NumPlacementArgs,
782                        const FunctionDecl *OperatorDelete,
783                        llvm::Value *Ptr,
784                        llvm::Value *AllocSize)
785      : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
786        Ptr(Ptr), AllocSize(AllocSize) {}
787
788    void setPlacementArg(unsigned I, RValue Arg) {
789      assert(I < NumPlacementArgs && "index out of range");
790      getPlacementArgs()[I] = Arg;
791    }
792
793    void Emit(CodeGenFunction &CGF, bool IsForEH) {
794      const FunctionProtoType *FPT
795        = OperatorDelete->getType()->getAs<FunctionProtoType>();
796      assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
797             (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
798
799      CallArgList DeleteArgs;
800
801      // The first argument is always a void*.
802      FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
803      DeleteArgs.push_back(std::make_pair(RValue::get(Ptr), *AI++));
804
805      // A member 'operator delete' can take an extra 'size_t' argument.
806      if (FPT->getNumArgs() == NumPlacementArgs + 2)
807        DeleteArgs.push_back(std::make_pair(RValue::get(AllocSize), *AI++));
808
809      // Pass the rest of the arguments, which must match exactly.
810      for (unsigned I = 0; I != NumPlacementArgs; ++I)
811        DeleteArgs.push_back(std::make_pair(getPlacementArgs()[I], *AI++));
812
813      // Call 'operator delete'.
814      CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT),
815                   CGF.CGM.GetAddrOfFunction(OperatorDelete),
816                   ReturnValueSlot(), DeleteArgs, OperatorDelete);
817    }
818  };
819
820  /// A cleanup to call the given 'operator delete' function upon
821  /// abnormal exit from a new expression when the new expression is
822  /// conditional.
823  class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup {
824    size_t NumPlacementArgs;
825    const FunctionDecl *OperatorDelete;
826    DominatingValue<RValue>::saved_type Ptr;
827    DominatingValue<RValue>::saved_type AllocSize;
828
829    DominatingValue<RValue>::saved_type *getPlacementArgs() {
830      return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
831    }
832
833  public:
834    static size_t getExtraSize(size_t NumPlacementArgs) {
835      return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
836    }
837
838    CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
839                                   const FunctionDecl *OperatorDelete,
840                                   DominatingValue<RValue>::saved_type Ptr,
841                              DominatingValue<RValue>::saved_type AllocSize)
842      : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
843        Ptr(Ptr), AllocSize(AllocSize) {}
844
845    void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
846      assert(I < NumPlacementArgs && "index out of range");
847      getPlacementArgs()[I] = Arg;
848    }
849
850    void Emit(CodeGenFunction &CGF, bool IsForEH) {
851      const FunctionProtoType *FPT
852        = OperatorDelete->getType()->getAs<FunctionProtoType>();
853      assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
854             (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
855
856      CallArgList DeleteArgs;
857
858      // The first argument is always a void*.
859      FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
860      DeleteArgs.push_back(std::make_pair(Ptr.restore(CGF), *AI++));
861
862      // A member 'operator delete' can take an extra 'size_t' argument.
863      if (FPT->getNumArgs() == NumPlacementArgs + 2) {
864        RValue RV = AllocSize.restore(CGF);
865        DeleteArgs.push_back(std::make_pair(RV, *AI++));
866      }
867
868      // Pass the rest of the arguments, which must match exactly.
869      for (unsigned I = 0; I != NumPlacementArgs; ++I) {
870        RValue RV = getPlacementArgs()[I].restore(CGF);
871        DeleteArgs.push_back(std::make_pair(RV, *AI++));
872      }
873
874      // Call 'operator delete'.
875      CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT),
876                   CGF.CGM.GetAddrOfFunction(OperatorDelete),
877                   ReturnValueSlot(), DeleteArgs, OperatorDelete);
878    }
879  };
880}
881
882/// Enter a cleanup to call 'operator delete' if the initializer in a
883/// new-expression throws.
884static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
885                                  const CXXNewExpr *E,
886                                  llvm::Value *NewPtr,
887                                  llvm::Value *AllocSize,
888                                  const CallArgList &NewArgs) {
889  // If we're not inside a conditional branch, then the cleanup will
890  // dominate and we can do the easier (and more efficient) thing.
891  if (!CGF.isInConditionalBranch()) {
892    CallDeleteDuringNew *Cleanup = CGF.EHStack
893      .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
894                                                 E->getNumPlacementArgs(),
895                                                 E->getOperatorDelete(),
896                                                 NewPtr, AllocSize);
897    for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
898      Cleanup->setPlacementArg(I, NewArgs[I+1].first);
899
900    return;
901  }
902
903  // Otherwise, we need to save all this stuff.
904  DominatingValue<RValue>::saved_type SavedNewPtr =
905    DominatingValue<RValue>::save(CGF, RValue::get(NewPtr));
906  DominatingValue<RValue>::saved_type SavedAllocSize =
907    DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
908
909  CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
910    .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(InactiveEHCleanup,
911                                                 E->getNumPlacementArgs(),
912                                                 E->getOperatorDelete(),
913                                                 SavedNewPtr,
914                                                 SavedAllocSize);
915  for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
916    Cleanup->setPlacementArg(I,
917                     DominatingValue<RValue>::save(CGF, NewArgs[I+1].first));
918
919  CGF.ActivateCleanupBlock(CGF.EHStack.stable_begin());
920}
921
922llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
923  QualType AllocType = E->getAllocatedType();
924  if (AllocType->isArrayType())
925    while (const ArrayType *AType = getContext().getAsArrayType(AllocType))
926      AllocType = AType->getElementType();
927
928  FunctionDecl *NewFD = E->getOperatorNew();
929  const FunctionProtoType *NewFTy = NewFD->getType()->getAs<FunctionProtoType>();
930
931  CallArgList NewArgs;
932
933  // The allocation size is the first argument.
934  QualType SizeTy = getContext().getSizeType();
935
936  llvm::Value *NumElements = 0;
937  llvm::Value *AllocSizeWithoutCookie = 0;
938  llvm::Value *AllocSize = EmitCXXNewAllocSize(getContext(),
939                                               *this, E, NumElements,
940                                               AllocSizeWithoutCookie);
941
942  NewArgs.push_back(std::make_pair(RValue::get(AllocSize), SizeTy));
943
944  // Emit the rest of the arguments.
945  // FIXME: Ideally, this should just use EmitCallArgs.
946  CXXNewExpr::const_arg_iterator NewArg = E->placement_arg_begin();
947
948  // First, use the types from the function type.
949  // We start at 1 here because the first argument (the allocation size)
950  // has already been emitted.
951  for (unsigned i = 1, e = NewFTy->getNumArgs(); i != e; ++i, ++NewArg) {
952    QualType ArgType = NewFTy->getArgType(i);
953
954    assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
955           getTypePtr() ==
956           getContext().getCanonicalType(NewArg->getType()).getTypePtr() &&
957           "type mismatch in call argument!");
958
959    NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
960                                     ArgType));
961
962  }
963
964  // Either we've emitted all the call args, or we have a call to a
965  // variadic function.
966  assert((NewArg == E->placement_arg_end() || NewFTy->isVariadic()) &&
967         "Extra arguments in non-variadic function!");
968
969  // If we still have any arguments, emit them using the type of the argument.
970  for (CXXNewExpr::const_arg_iterator NewArgEnd = E->placement_arg_end();
971       NewArg != NewArgEnd; ++NewArg) {
972    QualType ArgType = NewArg->getType();
973    NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
974                                     ArgType));
975  }
976
977  // Emit the call to new.
978  RValue RV =
979    EmitCall(CGM.getTypes().getFunctionInfo(NewArgs, NewFTy),
980             CGM.GetAddrOfFunction(NewFD), ReturnValueSlot(), NewArgs, NewFD);
981
982  // If an allocation function is declared with an empty exception specification
983  // it returns null to indicate failure to allocate storage. [expr.new]p13.
984  // (We don't need to check for null when there's no new initializer and
985  // we're allocating a POD type).
986  bool NullCheckResult = NewFTy->hasEmptyExceptionSpec() &&
987    !(AllocType->isPODType() && !E->hasInitializer());
988
989  llvm::BasicBlock *NullCheckSource = 0;
990  llvm::BasicBlock *NewNotNull = 0;
991  llvm::BasicBlock *NewEnd = 0;
992
993  llvm::Value *NewPtr = RV.getScalarVal();
994  unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace();
995
996  // The null-check means that the initializer is conditionally
997  // evaluated.
998  ConditionalEvaluation conditional(*this);
999
1000  if (NullCheckResult) {
1001    NullCheckSource = Builder.GetInsertBlock();
1002    NewNotNull = createBasicBlock("new.notnull");
1003    NewEnd = createBasicBlock("new.end");
1004
1005    llvm::Value *IsNull = Builder.CreateIsNull(NewPtr, "new.isnull");
1006    Builder.CreateCondBr(IsNull, NewEnd, NewNotNull);
1007    EmitBlock(NewNotNull);
1008
1009    conditional.begin(*this);
1010  }
1011
1012  assert((AllocSize == AllocSizeWithoutCookie) ==
1013         CalculateCookiePadding(*this, E).isZero());
1014  if (AllocSize != AllocSizeWithoutCookie) {
1015    assert(E->isArray());
1016    NewPtr = CGM.getCXXABI().InitializeArrayCookie(*this, NewPtr, NumElements,
1017                                                   E, AllocType);
1018  }
1019
1020  // If there's an operator delete, enter a cleanup to call it if an
1021  // exception is thrown.
1022  EHScopeStack::stable_iterator CallOperatorDelete;
1023  if (E->getOperatorDelete()) {
1024    EnterNewDeleteCleanup(*this, E, NewPtr, AllocSize, NewArgs);
1025    CallOperatorDelete = EHStack.stable_begin();
1026  }
1027
1028  const llvm::Type *ElementPtrTy
1029    = ConvertTypeForMem(AllocType)->getPointerTo(AS);
1030  NewPtr = Builder.CreateBitCast(NewPtr, ElementPtrTy);
1031
1032  if (E->isArray()) {
1033    EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie);
1034
1035    // NewPtr is a pointer to the base element type.  If we're
1036    // allocating an array of arrays, we'll need to cast back to the
1037    // array pointer type.
1038    const llvm::Type *ResultTy = ConvertTypeForMem(E->getType());
1039    if (NewPtr->getType() != ResultTy)
1040      NewPtr = Builder.CreateBitCast(NewPtr, ResultTy);
1041  } else {
1042    EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie);
1043  }
1044
1045  // Deactivate the 'operator delete' cleanup if we finished
1046  // initialization.
1047  if (CallOperatorDelete.isValid())
1048    DeactivateCleanupBlock(CallOperatorDelete);
1049
1050  if (NullCheckResult) {
1051    conditional.end(*this);
1052
1053    Builder.CreateBr(NewEnd);
1054    llvm::BasicBlock *NotNullSource = Builder.GetInsertBlock();
1055    EmitBlock(NewEnd);
1056
1057    llvm::PHINode *PHI = Builder.CreatePHI(NewPtr->getType());
1058    PHI->reserveOperandSpace(2);
1059    PHI->addIncoming(NewPtr, NotNullSource);
1060    PHI->addIncoming(llvm::Constant::getNullValue(NewPtr->getType()),
1061                     NullCheckSource);
1062
1063    NewPtr = PHI;
1064  }
1065
1066  return NewPtr;
1067}
1068
1069void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1070                                     llvm::Value *Ptr,
1071                                     QualType DeleteTy) {
1072  assert(DeleteFD->getOverloadedOperator() == OO_Delete);
1073
1074  const FunctionProtoType *DeleteFTy =
1075    DeleteFD->getType()->getAs<FunctionProtoType>();
1076
1077  CallArgList DeleteArgs;
1078
1079  // Check if we need to pass the size to the delete operator.
1080  llvm::Value *Size = 0;
1081  QualType SizeTy;
1082  if (DeleteFTy->getNumArgs() == 2) {
1083    SizeTy = DeleteFTy->getArgType(1);
1084    CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
1085    Size = llvm::ConstantInt::get(ConvertType(SizeTy),
1086                                  DeleteTypeSize.getQuantity());
1087  }
1088
1089  QualType ArgTy = DeleteFTy->getArgType(0);
1090  llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
1091  DeleteArgs.push_back(std::make_pair(RValue::get(DeletePtr), ArgTy));
1092
1093  if (Size)
1094    DeleteArgs.push_back(std::make_pair(RValue::get(Size), SizeTy));
1095
1096  // Emit the call to delete.
1097  EmitCall(CGM.getTypes().getFunctionInfo(DeleteArgs, DeleteFTy),
1098           CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(),
1099           DeleteArgs, DeleteFD);
1100}
1101
1102namespace {
1103  /// Calls the given 'operator delete' on a single object.
1104  struct CallObjectDelete : EHScopeStack::Cleanup {
1105    llvm::Value *Ptr;
1106    const FunctionDecl *OperatorDelete;
1107    QualType ElementType;
1108
1109    CallObjectDelete(llvm::Value *Ptr,
1110                     const FunctionDecl *OperatorDelete,
1111                     QualType ElementType)
1112      : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1113
1114    void Emit(CodeGenFunction &CGF, bool IsForEH) {
1115      CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
1116    }
1117  };
1118}
1119
1120/// Emit the code for deleting a single object.
1121static void EmitObjectDelete(CodeGenFunction &CGF,
1122                             const FunctionDecl *OperatorDelete,
1123                             llvm::Value *Ptr,
1124                             QualType ElementType) {
1125  // Find the destructor for the type, if applicable.  If the
1126  // destructor is virtual, we'll just emit the vcall and return.
1127  const CXXDestructorDecl *Dtor = 0;
1128  if (const RecordType *RT = ElementType->getAs<RecordType>()) {
1129    CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1130    if (!RD->hasTrivialDestructor()) {
1131      Dtor = RD->getDestructor();
1132
1133      if (Dtor->isVirtual()) {
1134        const llvm::Type *Ty =
1135          CGF.getTypes().GetFunctionType(CGF.getTypes().getFunctionInfo(Dtor,
1136                                                               Dtor_Complete),
1137                                         /*isVariadic=*/false);
1138
1139        llvm::Value *Callee
1140          = CGF.BuildVirtualCall(Dtor, Dtor_Deleting, Ptr, Ty);
1141        CGF.EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0,
1142                              0, 0);
1143
1144        // The dtor took care of deleting the object.
1145        return;
1146      }
1147    }
1148  }
1149
1150  // Make sure that we call delete even if the dtor throws.
1151  // This doesn't have to a conditional cleanup because we're going
1152  // to pop it off in a second.
1153  CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1154                                            Ptr, OperatorDelete, ElementType);
1155
1156  if (Dtor)
1157    CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1158                              /*ForVirtualBase=*/false, Ptr);
1159
1160  CGF.PopCleanupBlock();
1161}
1162
1163namespace {
1164  /// Calls the given 'operator delete' on an array of objects.
1165  struct CallArrayDelete : EHScopeStack::Cleanup {
1166    llvm::Value *Ptr;
1167    const FunctionDecl *OperatorDelete;
1168    llvm::Value *NumElements;
1169    QualType ElementType;
1170    CharUnits CookieSize;
1171
1172    CallArrayDelete(llvm::Value *Ptr,
1173                    const FunctionDecl *OperatorDelete,
1174                    llvm::Value *NumElements,
1175                    QualType ElementType,
1176                    CharUnits CookieSize)
1177      : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
1178        ElementType(ElementType), CookieSize(CookieSize) {}
1179
1180    void Emit(CodeGenFunction &CGF, bool IsForEH) {
1181      const FunctionProtoType *DeleteFTy =
1182        OperatorDelete->getType()->getAs<FunctionProtoType>();
1183      assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2);
1184
1185      CallArgList Args;
1186
1187      // Pass the pointer as the first argument.
1188      QualType VoidPtrTy = DeleteFTy->getArgType(0);
1189      llvm::Value *DeletePtr
1190        = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
1191      Args.push_back(std::make_pair(RValue::get(DeletePtr), VoidPtrTy));
1192
1193      // Pass the original requested size as the second argument.
1194      if (DeleteFTy->getNumArgs() == 2) {
1195        QualType size_t = DeleteFTy->getArgType(1);
1196        const llvm::IntegerType *SizeTy
1197          = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
1198
1199        CharUnits ElementTypeSize =
1200          CGF.CGM.getContext().getTypeSizeInChars(ElementType);
1201
1202        // The size of an element, multiplied by the number of elements.
1203        llvm::Value *Size
1204          = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
1205        Size = CGF.Builder.CreateMul(Size, NumElements);
1206
1207        // Plus the size of the cookie if applicable.
1208        if (!CookieSize.isZero()) {
1209          llvm::Value *CookieSizeV
1210            = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
1211          Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
1212        }
1213
1214        Args.push_back(std::make_pair(RValue::get(Size), size_t));
1215      }
1216
1217      // Emit the call to delete.
1218      CGF.EmitCall(CGF.getTypes().getFunctionInfo(Args, DeleteFTy),
1219                   CGF.CGM.GetAddrOfFunction(OperatorDelete),
1220                   ReturnValueSlot(), Args, OperatorDelete);
1221    }
1222  };
1223}
1224
1225/// Emit the code for deleting an array of objects.
1226static void EmitArrayDelete(CodeGenFunction &CGF,
1227                            const CXXDeleteExpr *E,
1228                            llvm::Value *Ptr,
1229                            QualType ElementType) {
1230  llvm::Value *NumElements = 0;
1231  llvm::Value *AllocatedPtr = 0;
1232  CharUnits CookieSize;
1233  CGF.CGM.getCXXABI().ReadArrayCookie(CGF, Ptr, E, ElementType,
1234                                      NumElements, AllocatedPtr, CookieSize);
1235
1236  assert(AllocatedPtr && "ReadArrayCookie didn't set AllocatedPtr");
1237
1238  // Make sure that we call delete even if one of the dtors throws.
1239  const FunctionDecl *OperatorDelete = E->getOperatorDelete();
1240  CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
1241                                           AllocatedPtr, OperatorDelete,
1242                                           NumElements, ElementType,
1243                                           CookieSize);
1244
1245  if (const CXXRecordDecl *RD = ElementType->getAsCXXRecordDecl()) {
1246    if (!RD->hasTrivialDestructor()) {
1247      assert(NumElements && "ReadArrayCookie didn't find element count"
1248                            " for a class with destructor");
1249      CGF.EmitCXXAggrDestructorCall(RD->getDestructor(), NumElements, Ptr);
1250    }
1251  }
1252
1253  CGF.PopCleanupBlock();
1254}
1255
1256void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
1257
1258  // Get at the argument before we performed the implicit conversion
1259  // to void*.
1260  const Expr *Arg = E->getArgument();
1261  while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
1262    if (ICE->getCastKind() != CK_UserDefinedConversion &&
1263        ICE->getType()->isVoidPointerType())
1264      Arg = ICE->getSubExpr();
1265    else
1266      break;
1267  }
1268
1269  llvm::Value *Ptr = EmitScalarExpr(Arg);
1270
1271  // Null check the pointer.
1272  llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
1273  llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
1274
1275  llvm::Value *IsNull =
1276    Builder.CreateICmpEQ(Ptr, llvm::Constant::getNullValue(Ptr->getType()),
1277                         "isnull");
1278
1279  Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
1280  EmitBlock(DeleteNotNull);
1281
1282  // We might be deleting a pointer to array.  If so, GEP down to the
1283  // first non-array element.
1284  // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
1285  QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
1286  if (DeleteTy->isConstantArrayType()) {
1287    llvm::Value *Zero = Builder.getInt32(0);
1288    llvm::SmallVector<llvm::Value*,8> GEP;
1289
1290    GEP.push_back(Zero); // point at the outermost array
1291
1292    // For each layer of array type we're pointing at:
1293    while (const ConstantArrayType *Arr
1294             = getContext().getAsConstantArrayType(DeleteTy)) {
1295      // 1. Unpeel the array type.
1296      DeleteTy = Arr->getElementType();
1297
1298      // 2. GEP to the first element of the array.
1299      GEP.push_back(Zero);
1300    }
1301
1302    Ptr = Builder.CreateInBoundsGEP(Ptr, GEP.begin(), GEP.end(), "del.first");
1303  }
1304
1305  assert(ConvertTypeForMem(DeleteTy) ==
1306         cast<llvm::PointerType>(Ptr->getType())->getElementType());
1307
1308  if (E->isArrayForm()) {
1309    EmitArrayDelete(*this, E, Ptr, DeleteTy);
1310  } else {
1311    EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy);
1312  }
1313
1314  EmitBlock(DeleteEnd);
1315}
1316
1317llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
1318  QualType Ty = E->getType();
1319  const llvm::Type *LTy = ConvertType(Ty)->getPointerTo();
1320
1321  if (E->isTypeOperand()) {
1322    llvm::Constant *TypeInfo =
1323      CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand());
1324    return Builder.CreateBitCast(TypeInfo, LTy);
1325  }
1326
1327  Expr *subE = E->getExprOperand();
1328  Ty = subE->getType();
1329  CanQualType CanTy = CGM.getContext().getCanonicalType(Ty);
1330  Ty = CanTy.getUnqualifiedType().getNonReferenceType();
1331  if (const RecordType *RT = Ty->getAs<RecordType>()) {
1332    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1333    if (RD->isPolymorphic()) {
1334      // FIXME: if subE is an lvalue do
1335      LValue Obj = EmitLValue(subE);
1336      llvm::Value *This = Obj.getAddress();
1337      // We need to do a zero check for *p, unless it has NonNullAttr.
1338      // FIXME: PointerType->hasAttr<NonNullAttr>()
1339      bool CanBeZero = false;
1340      if (UnaryOperator *UO = dyn_cast<UnaryOperator>(subE->IgnoreParens()))
1341        if (UO->getOpcode() == UO_Deref)
1342          CanBeZero = true;
1343      if (CanBeZero) {
1344        llvm::BasicBlock *NonZeroBlock = createBasicBlock();
1345        llvm::BasicBlock *ZeroBlock = createBasicBlock();
1346
1347        llvm::Value *Zero = llvm::Constant::getNullValue(This->getType());
1348        Builder.CreateCondBr(Builder.CreateICmpNE(This, Zero),
1349                             NonZeroBlock, ZeroBlock);
1350        EmitBlock(ZeroBlock);
1351        /// Call __cxa_bad_typeid
1352        const llvm::Type *ResultType = llvm::Type::getVoidTy(getLLVMContext());
1353        const llvm::FunctionType *FTy;
1354        FTy = llvm::FunctionType::get(ResultType, false);
1355        llvm::Value *F = CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1356        Builder.CreateCall(F)->setDoesNotReturn();
1357        Builder.CreateUnreachable();
1358        EmitBlock(NonZeroBlock);
1359      }
1360      llvm::Value *V = GetVTablePtr(This, LTy->getPointerTo());
1361      V = Builder.CreateConstInBoundsGEP1_64(V, -1ULL);
1362      V = Builder.CreateLoad(V);
1363      return V;
1364    }
1365  }
1366  return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(Ty), LTy);
1367}
1368
1369llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *V,
1370                                              const CXXDynamicCastExpr *DCE) {
1371  QualType SrcTy = DCE->getSubExpr()->getType();
1372  QualType DestTy = DCE->getTypeAsWritten();
1373  QualType InnerType = DestTy->getPointeeType();
1374
1375  const llvm::Type *LTy = ConvertType(DCE->getType());
1376
1377  bool CanBeZero = false;
1378  bool ToVoid = false;
1379  bool ThrowOnBad = false;
1380  if (DestTy->isPointerType()) {
1381    // FIXME: if PointerType->hasAttr<NonNullAttr>(), we don't set this
1382    CanBeZero = true;
1383    if (InnerType->isVoidType())
1384      ToVoid = true;
1385  } else {
1386    LTy = LTy->getPointerTo();
1387
1388    // FIXME: What if exceptions are disabled?
1389    ThrowOnBad = true;
1390  }
1391
1392  if (SrcTy->isPointerType() || SrcTy->isReferenceType())
1393    SrcTy = SrcTy->getPointeeType();
1394  SrcTy = SrcTy.getUnqualifiedType();
1395
1396  if (DestTy->isPointerType() || DestTy->isReferenceType())
1397    DestTy = DestTy->getPointeeType();
1398  DestTy = DestTy.getUnqualifiedType();
1399
1400  llvm::BasicBlock *ContBlock = createBasicBlock();
1401  llvm::BasicBlock *NullBlock = 0;
1402  llvm::BasicBlock *NonZeroBlock = 0;
1403  if (CanBeZero) {
1404    NonZeroBlock = createBasicBlock();
1405    NullBlock = createBasicBlock();
1406    Builder.CreateCondBr(Builder.CreateIsNotNull(V), NonZeroBlock, NullBlock);
1407    EmitBlock(NonZeroBlock);
1408  }
1409
1410  llvm::BasicBlock *BadCastBlock = 0;
1411
1412  const llvm::Type *PtrDiffTy = ConvertType(getContext().getPointerDiffType());
1413
1414  // See if this is a dynamic_cast(void*)
1415  if (ToVoid) {
1416    llvm::Value *This = V;
1417    V = GetVTablePtr(This, PtrDiffTy->getPointerTo());
1418    V = Builder.CreateConstInBoundsGEP1_64(V, -2ULL);
1419    V = Builder.CreateLoad(V, "offset to top");
1420    This = EmitCastToVoidPtr(This);
1421    V = Builder.CreateInBoundsGEP(This, V);
1422    V = Builder.CreateBitCast(V, LTy);
1423  } else {
1424    /// Call __dynamic_cast
1425    const llvm::Type *ResultType = Int8PtrTy;
1426    const llvm::FunctionType *FTy;
1427    std::vector<const llvm::Type*> ArgTys;
1428    ArgTys.push_back(Int8PtrTy);
1429    ArgTys.push_back(Int8PtrTy);
1430    ArgTys.push_back(Int8PtrTy);
1431    ArgTys.push_back(PtrDiffTy);
1432    FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
1433
1434    // FIXME: Calculate better hint.
1435    llvm::Value *hint = llvm::ConstantInt::get(PtrDiffTy, -1ULL);
1436
1437    assert(SrcTy->isRecordType() && "Src type must be record type!");
1438    assert(DestTy->isRecordType() && "Dest type must be record type!");
1439
1440    llvm::Value *SrcArg
1441      = CGM.GetAddrOfRTTIDescriptor(SrcTy.getUnqualifiedType());
1442    llvm::Value *DestArg
1443      = CGM.GetAddrOfRTTIDescriptor(DestTy.getUnqualifiedType());
1444
1445    V = Builder.CreateBitCast(V, Int8PtrTy);
1446    V = Builder.CreateCall4(CGM.CreateRuntimeFunction(FTy, "__dynamic_cast"),
1447                            V, SrcArg, DestArg, hint);
1448    V = Builder.CreateBitCast(V, LTy);
1449
1450    if (ThrowOnBad) {
1451      BadCastBlock = createBasicBlock();
1452      Builder.CreateCondBr(Builder.CreateIsNotNull(V), ContBlock, BadCastBlock);
1453      EmitBlock(BadCastBlock);
1454      /// Invoke __cxa_bad_cast
1455      ResultType = llvm::Type::getVoidTy(getLLVMContext());
1456      const llvm::FunctionType *FBadTy;
1457      FBadTy = llvm::FunctionType::get(ResultType, false);
1458      llvm::Value *F = CGM.CreateRuntimeFunction(FBadTy, "__cxa_bad_cast");
1459      if (llvm::BasicBlock *InvokeDest = getInvokeDest()) {
1460        llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
1461        Builder.CreateInvoke(F, Cont, InvokeDest)->setDoesNotReturn();
1462        EmitBlock(Cont);
1463      } else {
1464        // FIXME: Does this ever make sense?
1465        Builder.CreateCall(F)->setDoesNotReturn();
1466      }
1467      Builder.CreateUnreachable();
1468    }
1469  }
1470
1471  if (CanBeZero) {
1472    Builder.CreateBr(ContBlock);
1473    EmitBlock(NullBlock);
1474    Builder.CreateBr(ContBlock);
1475  }
1476  EmitBlock(ContBlock);
1477  if (CanBeZero) {
1478    llvm::PHINode *PHI = Builder.CreatePHI(LTy);
1479    PHI->reserveOperandSpace(2);
1480    PHI->addIncoming(V, NonZeroBlock);
1481    PHI->addIncoming(llvm::Constant::getNullValue(LTy), NullBlock);
1482    V = PHI;
1483  }
1484
1485  return V;
1486}
1487