CGExprCXX.cpp revision 8a97005f97a2a93fc2cd942c040668c5d4df7537
1//===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This contains code dealing with code generation of C++ expressions
11//
12//===----------------------------------------------------------------------===//
13
14#include "clang/Frontend/CodeGenOptions.h"
15#include "CodeGenFunction.h"
16#include "CGCXXABI.h"
17#include "CGObjCRuntime.h"
18#include "CGDebugInfo.h"
19#include "llvm/Intrinsics.h"
20using namespace clang;
21using namespace CodeGen;
22
23RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
24                                          llvm::Value *Callee,
25                                          ReturnValueSlot ReturnValue,
26                                          llvm::Value *This,
27                                          llvm::Value *VTT,
28                                          CallExpr::const_arg_iterator ArgBeg,
29                                          CallExpr::const_arg_iterator ArgEnd) {
30  assert(MD->isInstance() &&
31         "Trying to emit a member call expr on a static method!");
32
33  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
34
35  CallArgList Args;
36
37  // Push the this ptr.
38  Args.push_back(std::make_pair(RValue::get(This),
39                                MD->getThisType(getContext())));
40
41  // If there is a VTT parameter, emit it.
42  if (VTT) {
43    QualType T = getContext().getPointerType(getContext().VoidPtrTy);
44    Args.push_back(std::make_pair(RValue::get(VTT), T));
45  }
46
47  // And the rest of the call args
48  EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
49
50  QualType ResultType = FPT->getResultType();
51  return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args,
52                                                 FPT->getExtInfo()),
53                  Callee, ReturnValue, Args, MD);
54}
55
56/// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
57/// expr can be devirtualized.
58static bool canDevirtualizeMemberFunctionCalls(const Expr *Base) {
59  if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
60    if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
61      // This is a record decl. We know the type and can devirtualize it.
62      return VD->getType()->isRecordType();
63    }
64
65    return false;
66  }
67
68  // We can always devirtualize calls on temporary object expressions.
69  if (isa<CXXConstructExpr>(Base))
70    return true;
71
72  // And calls on bound temporaries.
73  if (isa<CXXBindTemporaryExpr>(Base))
74    return true;
75
76  // Check if this is a call expr that returns a record type.
77  if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
78    return CE->getCallReturnType()->isRecordType();
79
80  // We can't devirtualize the call.
81  return false;
82}
83
84RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
85                                              ReturnValueSlot ReturnValue) {
86  if (isa<BinaryOperator>(CE->getCallee()->IgnoreParens()))
87    return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
88
89  const MemberExpr *ME = cast<MemberExpr>(CE->getCallee()->IgnoreParens());
90  const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
91
92  CGDebugInfo *DI = getDebugInfo();
93  if (DI && CGM.getCodeGenOpts().LimitDebugInfo
94      && !isa<CallExpr>(ME->getBase())) {
95    QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType();
96    if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) {
97      DI->getOrCreateRecordType(PTy->getPointeeType(),
98                                MD->getParent()->getLocation());
99    }
100  }
101
102  if (MD->isStatic()) {
103    // The method is static, emit it as we would a regular call.
104    llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
105    return EmitCall(getContext().getPointerType(MD->getType()), Callee,
106                    ReturnValue, CE->arg_begin(), CE->arg_end());
107  }
108
109  // Compute the object pointer.
110  llvm::Value *This;
111  if (ME->isArrow())
112    This = EmitScalarExpr(ME->getBase());
113  else {
114    LValue BaseLV = EmitLValue(ME->getBase());
115    if (BaseLV.isPropertyRef() || BaseLV.isKVCRef()) {
116      QualType QT = ME->getBase()->getType();
117      RValue RV =
118        BaseLV.isPropertyRef() ? EmitLoadOfPropertyRefLValue(BaseLV, QT)
119          : EmitLoadOfKVCRefLValue(BaseLV, QT);
120      This = RV.isScalar() ? RV.getScalarVal() : RV.getAggregateAddr();
121    }
122    else
123      This = BaseLV.getAddress();
124  }
125
126  if (MD->isTrivial()) {
127    if (isa<CXXDestructorDecl>(MD)) return RValue::get(0);
128
129    assert(MD->isCopyAssignmentOperator() && "unknown trivial member function");
130    // We don't like to generate the trivial copy assignment operator when
131    // it isn't necessary; just produce the proper effect here.
132    llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
133    EmitAggregateCopy(This, RHS, CE->getType());
134    return RValue::get(This);
135  }
136
137  // Compute the function type we're calling.
138  const CGFunctionInfo &FInfo =
139    (isa<CXXDestructorDecl>(MD)
140     ? CGM.getTypes().getFunctionInfo(cast<CXXDestructorDecl>(MD),
141                                      Dtor_Complete)
142     : CGM.getTypes().getFunctionInfo(MD));
143
144  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
145  const llvm::Type *Ty
146    = CGM.getTypes().GetFunctionType(FInfo, FPT->isVariadic());
147
148  // C++ [class.virtual]p12:
149  //   Explicit qualification with the scope operator (5.1) suppresses the
150  //   virtual call mechanism.
151  //
152  // We also don't emit a virtual call if the base expression has a record type
153  // because then we know what the type is.
154  bool UseVirtualCall = MD->isVirtual() && !ME->hasQualifier()
155                     && !canDevirtualizeMemberFunctionCalls(ME->getBase());
156
157  llvm::Value *Callee;
158  if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
159    if (UseVirtualCall) {
160      Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty);
161    } else {
162      Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty);
163    }
164  } else if (UseVirtualCall) {
165    Callee = BuildVirtualCall(MD, This, Ty);
166  } else {
167    Callee = CGM.GetAddrOfFunction(MD, Ty);
168  }
169
170  return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
171                           CE->arg_begin(), CE->arg_end());
172}
173
174RValue
175CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
176                                              ReturnValueSlot ReturnValue) {
177  const BinaryOperator *BO =
178      cast<BinaryOperator>(E->getCallee()->IgnoreParens());
179  const Expr *BaseExpr = BO->getLHS();
180  const Expr *MemFnExpr = BO->getRHS();
181
182  const MemberPointerType *MPT =
183    MemFnExpr->getType()->getAs<MemberPointerType>();
184
185  const FunctionProtoType *FPT =
186    MPT->getPointeeType()->getAs<FunctionProtoType>();
187  const CXXRecordDecl *RD =
188    cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
189
190  // Get the member function pointer.
191  llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
192
193  // Emit the 'this' pointer.
194  llvm::Value *This;
195
196  if (BO->getOpcode() == BO_PtrMemI)
197    This = EmitScalarExpr(BaseExpr);
198  else
199    This = EmitLValue(BaseExpr).getAddress();
200
201  // Ask the ABI to load the callee.  Note that This is modified.
202  llvm::Value *Callee =
203    CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(CGF, This, MemFnPtr, MPT);
204
205  CallArgList Args;
206
207  QualType ThisType =
208    getContext().getPointerType(getContext().getTagDeclType(RD));
209
210  // Push the this ptr.
211  Args.push_back(std::make_pair(RValue::get(This), ThisType));
212
213  // And the rest of the call args
214  EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
215  const FunctionType *BO_FPT = BO->getType()->getAs<FunctionProtoType>();
216  return EmitCall(CGM.getTypes().getFunctionInfo(Args, BO_FPT), Callee,
217                  ReturnValue, Args);
218}
219
220RValue
221CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
222                                               const CXXMethodDecl *MD,
223                                               ReturnValueSlot ReturnValue) {
224  assert(MD->isInstance() &&
225         "Trying to emit a member call expr on a static method!");
226  if (MD->isCopyAssignmentOperator()) {
227    const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(MD->getDeclContext());
228    if (ClassDecl->hasTrivialCopyAssignment()) {
229      assert(!ClassDecl->hasUserDeclaredCopyAssignment() &&
230             "EmitCXXOperatorMemberCallExpr - user declared copy assignment");
231      LValue LV = EmitLValue(E->getArg(0));
232      llvm::Value *This;
233      if (LV.isPropertyRef() || LV.isKVCRef()) {
234        AggValueSlot Slot = CreateAggTemp(E->getArg(1)->getType());
235        EmitAggExpr(E->getArg(1), Slot);
236        if (LV.isPropertyRef())
237          EmitObjCPropertySet(LV.getPropertyRefExpr(), Slot.asRValue());
238        else
239          EmitObjCPropertySet(LV.getKVCRefExpr(), Slot.asRValue());
240        return RValue::getAggregate(0, false);
241      }
242      else
243        This = LV.getAddress();
244
245      llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
246      QualType Ty = E->getType();
247      EmitAggregateCopy(This, Src, Ty);
248      return RValue::get(This);
249    }
250  }
251
252  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
253  const llvm::Type *Ty =
254    CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
255                                   FPT->isVariadic());
256  LValue LV = EmitLValue(E->getArg(0));
257  llvm::Value *This;
258  if (LV.isPropertyRef() || LV.isKVCRef()) {
259    QualType QT = E->getArg(0)->getType();
260    RValue RV =
261      LV.isPropertyRef() ? EmitLoadOfPropertyRefLValue(LV, QT)
262                         : EmitLoadOfKVCRefLValue(LV, QT);
263    assert (!RV.isScalar() && "EmitCXXOperatorMemberCallExpr");
264    This = RV.getAggregateAddr();
265  }
266  else
267    This = LV.getAddress();
268
269  llvm::Value *Callee;
270  if (MD->isVirtual() && !canDevirtualizeMemberFunctionCalls(E->getArg(0)))
271    Callee = BuildVirtualCall(MD, This, Ty);
272  else
273    Callee = CGM.GetAddrOfFunction(MD, Ty);
274
275  return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
276                           E->arg_begin() + 1, E->arg_end());
277}
278
279void
280CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
281                                      AggValueSlot Dest) {
282  assert(!Dest.isIgnored() && "Must have a destination!");
283  const CXXConstructorDecl *CD = E->getConstructor();
284
285  // If we require zero initialization before (or instead of) calling the
286  // constructor, as can be the case with a non-user-provided default
287  // constructor, emit the zero initialization now.
288  if (E->requiresZeroInitialization())
289    EmitNullInitialization(Dest.getAddr(), E->getType());
290
291  // If this is a call to a trivial default constructor, do nothing.
292  if (CD->isTrivial() && CD->isDefaultConstructor())
293    return;
294
295  // Elide the constructor if we're constructing from a temporary.
296  // The temporary check is required because Sema sets this on NRVO
297  // returns.
298  if (getContext().getLangOptions().ElideConstructors && E->isElidable()) {
299    assert(getContext().hasSameUnqualifiedType(E->getType(),
300                                               E->getArg(0)->getType()));
301    if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
302      EmitAggExpr(E->getArg(0), Dest);
303      return;
304    }
305  }
306
307  const ConstantArrayType *Array
308    = getContext().getAsConstantArrayType(E->getType());
309  if (Array) {
310    QualType BaseElementTy = getContext().getBaseElementType(Array);
311    const llvm::Type *BasePtr = ConvertType(BaseElementTy);
312    BasePtr = llvm::PointerType::getUnqual(BasePtr);
313    llvm::Value *BaseAddrPtr =
314      Builder.CreateBitCast(Dest.getAddr(), BasePtr);
315
316    EmitCXXAggrConstructorCall(CD, Array, BaseAddrPtr,
317                               E->arg_begin(), E->arg_end());
318  }
319  else {
320    CXXCtorType Type =
321      (E->getConstructionKind() == CXXConstructExpr::CK_Complete)
322      ? Ctor_Complete : Ctor_Base;
323    bool ForVirtualBase =
324      E->getConstructionKind() == CXXConstructExpr::CK_VirtualBase;
325
326    // Call the constructor.
327    EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest.getAddr(),
328                           E->arg_begin(), E->arg_end());
329  }
330}
331
332/// Check whether the given operator new[] is the global placement
333/// operator new[].
334static bool IsPlacementOperatorNewArray(ASTContext &Ctx,
335                                        const FunctionDecl *Fn) {
336  // Must be in global scope.  Note that allocation functions can't be
337  // declared in namespaces.
338  if (!Fn->getDeclContext()->getRedeclContext()->isFileContext())
339    return false;
340
341  // Signature must be void *operator new[](size_t, void*).
342  // The size_t is common to all operator new[]s.
343  if (Fn->getNumParams() != 2)
344    return false;
345
346  CanQualType ParamType = Ctx.getCanonicalType(Fn->getParamDecl(1)->getType());
347  return (ParamType == Ctx.VoidPtrTy);
348}
349
350static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
351                                        const CXXNewExpr *E) {
352  if (!E->isArray())
353    return CharUnits::Zero();
354
355  // No cookie is required if the new operator being used is
356  // ::operator new[](size_t, void*).
357  const FunctionDecl *OperatorNew = E->getOperatorNew();
358  if (IsPlacementOperatorNewArray(CGF.getContext(), OperatorNew))
359    return CharUnits::Zero();
360
361  return CGF.CGM.getCXXABI().GetArrayCookieSize(E->getAllocatedType());
362}
363
364static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context,
365                                        CodeGenFunction &CGF,
366                                        const CXXNewExpr *E,
367                                        llvm::Value *&NumElements,
368                                        llvm::Value *&SizeWithoutCookie) {
369  QualType ElemType = E->getAllocatedType();
370
371  const llvm::IntegerType *SizeTy =
372    cast<llvm::IntegerType>(CGF.ConvertType(CGF.getContext().getSizeType()));
373
374  CharUnits TypeSize = CGF.getContext().getTypeSizeInChars(ElemType);
375
376  if (!E->isArray()) {
377    SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
378    return SizeWithoutCookie;
379  }
380
381  // Figure out the cookie size.
382  CharUnits CookieSize = CalculateCookiePadding(CGF, E);
383
384  // Emit the array size expression.
385  // We multiply the size of all dimensions for NumElements.
386  // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
387  NumElements = CGF.EmitScalarExpr(E->getArraySize());
388  assert(NumElements->getType() == SizeTy && "element count not a size_t");
389
390  uint64_t ArraySizeMultiplier = 1;
391  while (const ConstantArrayType *CAT
392             = CGF.getContext().getAsConstantArrayType(ElemType)) {
393    ElemType = CAT->getElementType();
394    ArraySizeMultiplier *= CAT->getSize().getZExtValue();
395  }
396
397  llvm::Value *Size;
398
399  // If someone is doing 'new int[42]' there is no need to do a dynamic check.
400  // Don't bloat the -O0 code.
401  if (llvm::ConstantInt *NumElementsC =
402        dyn_cast<llvm::ConstantInt>(NumElements)) {
403    llvm::APInt NEC = NumElementsC->getValue();
404    unsigned SizeWidth = NEC.getBitWidth();
405
406    // Determine if there is an overflow here by doing an extended multiply.
407    NEC.zext(SizeWidth*2);
408    llvm::APInt SC(SizeWidth*2, TypeSize.getQuantity());
409    SC *= NEC;
410
411    if (!CookieSize.isZero()) {
412      // Save the current size without a cookie.  We don't care if an
413      // overflow's already happened because SizeWithoutCookie isn't
414      // used if the allocator returns null or throws, as it should
415      // always do on an overflow.
416      llvm::APInt SWC = SC;
417      SWC.trunc(SizeWidth);
418      SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, SWC);
419
420      // Add the cookie size.
421      SC += llvm::APInt(SizeWidth*2, CookieSize.getQuantity());
422    }
423
424    if (SC.countLeadingZeros() >= SizeWidth) {
425      SC.trunc(SizeWidth);
426      Size = llvm::ConstantInt::get(SizeTy, SC);
427    } else {
428      // On overflow, produce a -1 so operator new throws.
429      Size = llvm::Constant::getAllOnesValue(SizeTy);
430    }
431
432    // Scale NumElements while we're at it.
433    uint64_t N = NEC.getZExtValue() * ArraySizeMultiplier;
434    NumElements = llvm::ConstantInt::get(SizeTy, N);
435
436  // Otherwise, we don't need to do an overflow-checked multiplication if
437  // we're multiplying by one.
438  } else if (TypeSize.isOne()) {
439    assert(ArraySizeMultiplier == 1);
440
441    Size = NumElements;
442
443    // If we need a cookie, add its size in with an overflow check.
444    // This is maybe a little paranoid.
445    if (!CookieSize.isZero()) {
446      SizeWithoutCookie = Size;
447
448      llvm::Value *CookieSizeV
449        = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
450
451      const llvm::Type *Types[] = { SizeTy };
452      llvm::Value *UAddF
453        = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1);
454      llvm::Value *AddRes
455        = CGF.Builder.CreateCall2(UAddF, Size, CookieSizeV);
456
457      Size = CGF.Builder.CreateExtractValue(AddRes, 0);
458      llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1);
459      Size = CGF.Builder.CreateSelect(DidOverflow,
460                                      llvm::ConstantInt::get(SizeTy, -1),
461                                      Size);
462    }
463
464  // Otherwise use the int.umul.with.overflow intrinsic.
465  } else {
466    llvm::Value *OutermostElementSize
467      = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
468
469    llvm::Value *NumOutermostElements = NumElements;
470
471    // Scale NumElements by the array size multiplier.  This might
472    // overflow, but only if the multiplication below also overflows,
473    // in which case this multiplication isn't used.
474    if (ArraySizeMultiplier != 1)
475      NumElements = CGF.Builder.CreateMul(NumElements,
476                         llvm::ConstantInt::get(SizeTy, ArraySizeMultiplier));
477
478    // The requested size of the outermost array is non-constant.
479    // Multiply that by the static size of the elements of that array;
480    // on unsigned overflow, set the size to -1 to trigger an
481    // exception from the allocation routine.  This is sufficient to
482    // prevent buffer overruns from the allocator returning a
483    // seemingly valid pointer to insufficient space.  This idea comes
484    // originally from MSVC, and GCC has an open bug requesting
485    // similar behavior:
486    //   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=19351
487    //
488    // This will not be sufficient for C++0x, which requires a
489    // specific exception class (std::bad_array_new_length).
490    // That will require ABI support that has not yet been specified.
491    const llvm::Type *Types[] = { SizeTy };
492    llvm::Value *UMulF
493      = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, Types, 1);
494    llvm::Value *MulRes = CGF.Builder.CreateCall2(UMulF, NumOutermostElements,
495                                                  OutermostElementSize);
496
497    // The overflow bit.
498    llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(MulRes, 1);
499
500    // The result of the multiplication.
501    Size = CGF.Builder.CreateExtractValue(MulRes, 0);
502
503    // If we have a cookie, we need to add that size in, too.
504    if (!CookieSize.isZero()) {
505      SizeWithoutCookie = Size;
506
507      llvm::Value *CookieSizeV
508        = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
509      llvm::Value *UAddF
510        = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1);
511      llvm::Value *AddRes
512        = CGF.Builder.CreateCall2(UAddF, SizeWithoutCookie, CookieSizeV);
513
514      Size = CGF.Builder.CreateExtractValue(AddRes, 0);
515
516      llvm::Value *AddDidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1);
517      DidOverflow = CGF.Builder.CreateAnd(DidOverflow, AddDidOverflow);
518    }
519
520    Size = CGF.Builder.CreateSelect(DidOverflow,
521                                    llvm::ConstantInt::get(SizeTy, -1),
522                                    Size);
523  }
524
525  if (CookieSize.isZero())
526    SizeWithoutCookie = Size;
527  else
528    assert(SizeWithoutCookie && "didn't set SizeWithoutCookie?");
529
530  return Size;
531}
532
533static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E,
534                                    llvm::Value *NewPtr) {
535
536  assert(E->getNumConstructorArgs() == 1 &&
537         "Can only have one argument to initializer of POD type.");
538
539  const Expr *Init = E->getConstructorArg(0);
540  QualType AllocType = E->getAllocatedType();
541
542  unsigned Alignment =
543    CGF.getContext().getTypeAlignInChars(AllocType).getQuantity();
544  if (!CGF.hasAggregateLLVMType(AllocType))
545    CGF.EmitStoreOfScalar(CGF.EmitScalarExpr(Init), NewPtr,
546                          AllocType.isVolatileQualified(), Alignment,
547                          AllocType);
548  else if (AllocType->isAnyComplexType())
549    CGF.EmitComplexExprIntoAddr(Init, NewPtr,
550                                AllocType.isVolatileQualified());
551  else {
552    AggValueSlot Slot
553      = AggValueSlot::forAddr(NewPtr, AllocType.isVolatileQualified(), true);
554    CGF.EmitAggExpr(Init, Slot);
555  }
556}
557
558void
559CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
560                                         llvm::Value *NewPtr,
561                                         llvm::Value *NumElements) {
562  // We have a POD type.
563  if (E->getNumConstructorArgs() == 0)
564    return;
565
566  const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
567
568  // Create a temporary for the loop index and initialize it with 0.
569  llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index");
570  llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
571  Builder.CreateStore(Zero, IndexPtr);
572
573  // Start the loop with a block that tests the condition.
574  llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
575  llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
576
577  EmitBlock(CondBlock);
578
579  llvm::BasicBlock *ForBody = createBasicBlock("for.body");
580
581  // Generate: if (loop-index < number-of-elements fall to the loop body,
582  // otherwise, go to the block after the for-loop.
583  llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
584  llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElements, "isless");
585  // If the condition is true, execute the body.
586  Builder.CreateCondBr(IsLess, ForBody, AfterFor);
587
588  EmitBlock(ForBody);
589
590  llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
591  // Inside the loop body, emit the constructor call on the array element.
592  Counter = Builder.CreateLoad(IndexPtr);
593  llvm::Value *Address = Builder.CreateInBoundsGEP(NewPtr, Counter,
594                                                   "arrayidx");
595  StoreAnyExprIntoOneUnit(*this, E, Address);
596
597  EmitBlock(ContinueBlock);
598
599  // Emit the increment of the loop counter.
600  llvm::Value *NextVal = llvm::ConstantInt::get(SizeTy, 1);
601  Counter = Builder.CreateLoad(IndexPtr);
602  NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
603  Builder.CreateStore(NextVal, IndexPtr);
604
605  // Finally, branch back up to the condition for the next iteration.
606  EmitBranch(CondBlock);
607
608  // Emit the fall-through block.
609  EmitBlock(AfterFor, true);
610}
611
612static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
613                           llvm::Value *NewPtr, llvm::Value *Size) {
614  llvm::LLVMContext &VMContext = CGF.CGM.getLLVMContext();
615  const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
616  if (NewPtr->getType() != BP)
617    NewPtr = CGF.Builder.CreateBitCast(NewPtr, BP, "tmp");
618
619  CGF.Builder.CreateCall5(CGF.CGM.getMemSetFn(BP, CGF.IntPtrTy), NewPtr,
620                llvm::Constant::getNullValue(llvm::Type::getInt8Ty(VMContext)),
621                          Size,
622                    llvm::ConstantInt::get(CGF.Int32Ty,
623                                           CGF.getContext().getTypeAlign(T)/8),
624                          llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext),
625                                                 0));
626}
627
628static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
629                               llvm::Value *NewPtr,
630                               llvm::Value *NumElements,
631                               llvm::Value *AllocSizeWithoutCookie) {
632  if (E->isArray()) {
633    if (CXXConstructorDecl *Ctor = E->getConstructor()) {
634      bool RequiresZeroInitialization = false;
635      if (Ctor->getParent()->hasTrivialConstructor()) {
636        // If new expression did not specify value-initialization, then there
637        // is no initialization.
638        if (!E->hasInitializer() || Ctor->getParent()->isEmpty())
639          return;
640
641        if (CGF.CGM.getTypes().isZeroInitializable(E->getAllocatedType())) {
642          // Optimization: since zero initialization will just set the memory
643          // to all zeroes, generate a single memset to do it in one shot.
644          EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr,
645                         AllocSizeWithoutCookie);
646          return;
647        }
648
649        RequiresZeroInitialization = true;
650      }
651
652      CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
653                                     E->constructor_arg_begin(),
654                                     E->constructor_arg_end(),
655                                     RequiresZeroInitialization);
656      return;
657    } else if (E->getNumConstructorArgs() == 1 &&
658               isa<ImplicitValueInitExpr>(E->getConstructorArg(0))) {
659      // Optimization: since zero initialization will just set the memory
660      // to all zeroes, generate a single memset to do it in one shot.
661      EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr,
662                     AllocSizeWithoutCookie);
663      return;
664    } else {
665      CGF.EmitNewArrayInitializer(E, NewPtr, NumElements);
666      return;
667    }
668  }
669
670  if (CXXConstructorDecl *Ctor = E->getConstructor()) {
671    // Per C++ [expr.new]p15, if we have an initializer, then we're performing
672    // direct initialization. C++ [dcl.init]p5 requires that we
673    // zero-initialize storage if there are no user-declared constructors.
674    if (E->hasInitializer() &&
675        !Ctor->getParent()->hasUserDeclaredConstructor() &&
676        !Ctor->getParent()->isEmpty())
677      CGF.EmitNullInitialization(NewPtr, E->getAllocatedType());
678
679    CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
680                               NewPtr, E->constructor_arg_begin(),
681                               E->constructor_arg_end());
682
683    return;
684  }
685  // We have a POD type.
686  if (E->getNumConstructorArgs() == 0)
687    return;
688
689  StoreAnyExprIntoOneUnit(CGF, E, NewPtr);
690}
691
692namespace {
693/// A utility class for saving an rvalue.
694class SavedRValue {
695public:
696  enum Kind { ScalarLiteral, ScalarAddress,
697              AggregateLiteral, AggregateAddress,
698              Complex };
699
700private:
701  llvm::Value *Value;
702  Kind K;
703
704  SavedRValue(llvm::Value *V, Kind K) : Value(V), K(K) {}
705
706public:
707  SavedRValue() {}
708
709  static SavedRValue forScalarLiteral(llvm::Value *V) {
710    return SavedRValue(V, ScalarLiteral);
711  }
712
713  static SavedRValue forScalarAddress(llvm::Value *Addr) {
714    return SavedRValue(Addr, ScalarAddress);
715  }
716
717  static SavedRValue forAggregateLiteral(llvm::Value *V) {
718    return SavedRValue(V, AggregateLiteral);
719  }
720
721  static SavedRValue forAggregateAddress(llvm::Value *Addr) {
722    return SavedRValue(Addr, AggregateAddress);
723  }
724
725  static SavedRValue forComplexAddress(llvm::Value *Addr) {
726    return SavedRValue(Addr, Complex);
727  }
728
729  Kind getKind() const { return K; }
730  llvm::Value *getValue() const { return Value; }
731};
732} // end anonymous namespace
733
734/// Given an r-value, perform the code necessary to make sure that a
735/// future RestoreRValue will be able to load the value without
736/// domination concerns.
737static SavedRValue SaveRValue(CodeGenFunction &CGF, RValue RV) {
738  if (RV.isScalar()) {
739    llvm::Value *V = RV.getScalarVal();
740
741    // These automatically dominate and don't need to be saved.
742    if (isa<llvm::Constant>(V) || isa<llvm::AllocaInst>(V))
743      return SavedRValue::forScalarLiteral(V);
744
745    // Everything else needs an alloca.
746    llvm::Value *Addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue");
747    CGF.Builder.CreateStore(V, Addr);
748    return SavedRValue::forScalarAddress(Addr);
749  }
750
751  if (RV.isComplex()) {
752    CodeGenFunction::ComplexPairTy V = RV.getComplexVal();
753    const llvm::Type *ComplexTy =
754      llvm::StructType::get(CGF.getLLVMContext(),
755                            V.first->getType(), V.second->getType(),
756                            (void*) 0);
757    llvm::Value *Addr = CGF.CreateTempAlloca(ComplexTy, "saved-complex");
758    CGF.StoreComplexToAddr(V, Addr, /*volatile*/ false);
759    return SavedRValue::forComplexAddress(Addr);
760  }
761
762  assert(RV.isAggregate());
763  llvm::Value *V = RV.getAggregateAddr(); // TODO: volatile?
764  if (isa<llvm::Constant>(V) || isa<llvm::AllocaInst>(V))
765    return SavedRValue::forAggregateLiteral(V);
766
767  llvm::Value *Addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue");
768  CGF.Builder.CreateStore(V, Addr);
769  return SavedRValue::forAggregateAddress(Addr);
770}
771
772/// Given a saved r-value produced by SaveRValue, perform the code
773/// necessary to restore it to usability at the current insertion
774/// point.
775static RValue RestoreRValue(CodeGenFunction &CGF, SavedRValue RV) {
776  switch (RV.getKind()) {
777  case SavedRValue::ScalarLiteral:
778    return RValue::get(RV.getValue());
779  case SavedRValue::ScalarAddress:
780    return RValue::get(CGF.Builder.CreateLoad(RV.getValue()));
781  case SavedRValue::AggregateLiteral:
782    return RValue::getAggregate(RV.getValue());
783  case SavedRValue::AggregateAddress:
784    return RValue::getAggregate(CGF.Builder.CreateLoad(RV.getValue()));
785  case SavedRValue::Complex:
786    return RValue::getComplex(CGF.LoadComplexFromAddr(RV.getValue(), false));
787  }
788
789  llvm_unreachable("bad saved r-value kind");
790  return RValue();
791}
792
793namespace {
794  /// A cleanup to call the given 'operator delete' function upon
795  /// abnormal exit from a new expression.
796  class CallDeleteDuringNew : public EHScopeStack::Cleanup {
797    size_t NumPlacementArgs;
798    const FunctionDecl *OperatorDelete;
799    llvm::Value *Ptr;
800    llvm::Value *AllocSize;
801
802    RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
803
804  public:
805    static size_t getExtraSize(size_t NumPlacementArgs) {
806      return NumPlacementArgs * sizeof(RValue);
807    }
808
809    CallDeleteDuringNew(size_t NumPlacementArgs,
810                        const FunctionDecl *OperatorDelete,
811                        llvm::Value *Ptr,
812                        llvm::Value *AllocSize)
813      : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
814        Ptr(Ptr), AllocSize(AllocSize) {}
815
816    void setPlacementArg(unsigned I, RValue Arg) {
817      assert(I < NumPlacementArgs && "index out of range");
818      getPlacementArgs()[I] = Arg;
819    }
820
821    void Emit(CodeGenFunction &CGF, bool IsForEH) {
822      const FunctionProtoType *FPT
823        = OperatorDelete->getType()->getAs<FunctionProtoType>();
824      assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
825             (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
826
827      CallArgList DeleteArgs;
828
829      // The first argument is always a void*.
830      FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
831      DeleteArgs.push_back(std::make_pair(RValue::get(Ptr), *AI++));
832
833      // A member 'operator delete' can take an extra 'size_t' argument.
834      if (FPT->getNumArgs() == NumPlacementArgs + 2)
835        DeleteArgs.push_back(std::make_pair(RValue::get(AllocSize), *AI++));
836
837      // Pass the rest of the arguments, which must match exactly.
838      for (unsigned I = 0; I != NumPlacementArgs; ++I)
839        DeleteArgs.push_back(std::make_pair(getPlacementArgs()[I], *AI++));
840
841      // Call 'operator delete'.
842      CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT),
843                   CGF.CGM.GetAddrOfFunction(OperatorDelete),
844                   ReturnValueSlot(), DeleteArgs, OperatorDelete);
845    }
846  };
847
848  /// A cleanup to call the given 'operator delete' function upon
849  /// abnormal exit from a new expression when the new expression is
850  /// conditional.
851  class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup {
852    size_t NumPlacementArgs;
853    const FunctionDecl *OperatorDelete;
854    SavedRValue Ptr;
855    SavedRValue AllocSize;
856
857    SavedRValue *getPlacementArgs() {
858      return reinterpret_cast<SavedRValue*>(this+1);
859    }
860
861  public:
862    static size_t getExtraSize(size_t NumPlacementArgs) {
863      return NumPlacementArgs * sizeof(SavedRValue);
864    }
865
866    CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
867                                   const FunctionDecl *OperatorDelete,
868                                   SavedRValue Ptr,
869                                   SavedRValue AllocSize)
870      : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
871        Ptr(Ptr), AllocSize(AllocSize) {}
872
873    void setPlacementArg(unsigned I, SavedRValue Arg) {
874      assert(I < NumPlacementArgs && "index out of range");
875      getPlacementArgs()[I] = Arg;
876    }
877
878    void Emit(CodeGenFunction &CGF, bool IsForEH) {
879      const FunctionProtoType *FPT
880        = OperatorDelete->getType()->getAs<FunctionProtoType>();
881      assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
882             (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
883
884      CallArgList DeleteArgs;
885
886      // The first argument is always a void*.
887      FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
888      DeleteArgs.push_back(std::make_pair(RestoreRValue(CGF, Ptr), *AI++));
889
890      // A member 'operator delete' can take an extra 'size_t' argument.
891      if (FPT->getNumArgs() == NumPlacementArgs + 2) {
892        RValue RV = RestoreRValue(CGF, AllocSize);
893        DeleteArgs.push_back(std::make_pair(RV, *AI++));
894      }
895
896      // Pass the rest of the arguments, which must match exactly.
897      for (unsigned I = 0; I != NumPlacementArgs; ++I) {
898        RValue RV = RestoreRValue(CGF, getPlacementArgs()[I]);
899        DeleteArgs.push_back(std::make_pair(RV, *AI++));
900      }
901
902      // Call 'operator delete'.
903      CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT),
904                   CGF.CGM.GetAddrOfFunction(OperatorDelete),
905                   ReturnValueSlot(), DeleteArgs, OperatorDelete);
906    }
907  };
908}
909
910/// Enter a cleanup to call 'operator delete' if the initializer in a
911/// new-expression throws.
912static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
913                                  const CXXNewExpr *E,
914                                  llvm::Value *NewPtr,
915                                  llvm::Value *AllocSize,
916                                  const CallArgList &NewArgs) {
917  // If we're not inside a conditional branch, then the cleanup will
918  // dominate and we can do the easier (and more efficient) thing.
919  if (!CGF.isInConditionalBranch()) {
920    CallDeleteDuringNew *Cleanup = CGF.EHStack
921      .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
922                                                 E->getNumPlacementArgs(),
923                                                 E->getOperatorDelete(),
924                                                 NewPtr, AllocSize);
925    for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
926      Cleanup->setPlacementArg(I, NewArgs[I+1].first);
927
928    return;
929  }
930
931  // Otherwise, we need to save all this stuff.
932  SavedRValue SavedNewPtr = SaveRValue(CGF, RValue::get(NewPtr));
933  SavedRValue SavedAllocSize = SaveRValue(CGF, RValue::get(AllocSize));
934
935  CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
936    .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(InactiveEHCleanup,
937                                                 E->getNumPlacementArgs(),
938                                                 E->getOperatorDelete(),
939                                                 SavedNewPtr,
940                                                 SavedAllocSize);
941  for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
942    Cleanup->setPlacementArg(I, SaveRValue(CGF, NewArgs[I+1].first));
943
944  CGF.ActivateCleanupBlock(CGF.EHStack.stable_begin());
945}
946
947llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
948  QualType AllocType = E->getAllocatedType();
949  if (AllocType->isArrayType())
950    while (const ArrayType *AType = getContext().getAsArrayType(AllocType))
951      AllocType = AType->getElementType();
952
953  FunctionDecl *NewFD = E->getOperatorNew();
954  const FunctionProtoType *NewFTy = NewFD->getType()->getAs<FunctionProtoType>();
955
956  CallArgList NewArgs;
957
958  // The allocation size is the first argument.
959  QualType SizeTy = getContext().getSizeType();
960
961  llvm::Value *NumElements = 0;
962  llvm::Value *AllocSizeWithoutCookie = 0;
963  llvm::Value *AllocSize = EmitCXXNewAllocSize(getContext(),
964                                               *this, E, NumElements,
965                                               AllocSizeWithoutCookie);
966
967  NewArgs.push_back(std::make_pair(RValue::get(AllocSize), SizeTy));
968
969  // Emit the rest of the arguments.
970  // FIXME: Ideally, this should just use EmitCallArgs.
971  CXXNewExpr::const_arg_iterator NewArg = E->placement_arg_begin();
972
973  // First, use the types from the function type.
974  // We start at 1 here because the first argument (the allocation size)
975  // has already been emitted.
976  for (unsigned i = 1, e = NewFTy->getNumArgs(); i != e; ++i, ++NewArg) {
977    QualType ArgType = NewFTy->getArgType(i);
978
979    assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
980           getTypePtr() ==
981           getContext().getCanonicalType(NewArg->getType()).getTypePtr() &&
982           "type mismatch in call argument!");
983
984    NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
985                                     ArgType));
986
987  }
988
989  // Either we've emitted all the call args, or we have a call to a
990  // variadic function.
991  assert((NewArg == E->placement_arg_end() || NewFTy->isVariadic()) &&
992         "Extra arguments in non-variadic function!");
993
994  // If we still have any arguments, emit them using the type of the argument.
995  for (CXXNewExpr::const_arg_iterator NewArgEnd = E->placement_arg_end();
996       NewArg != NewArgEnd; ++NewArg) {
997    QualType ArgType = NewArg->getType();
998    NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
999                                     ArgType));
1000  }
1001
1002  // Emit the call to new.
1003  RValue RV =
1004    EmitCall(CGM.getTypes().getFunctionInfo(NewArgs, NewFTy),
1005             CGM.GetAddrOfFunction(NewFD), ReturnValueSlot(), NewArgs, NewFD);
1006
1007  // If an allocation function is declared with an empty exception specification
1008  // it returns null to indicate failure to allocate storage. [expr.new]p13.
1009  // (We don't need to check for null when there's no new initializer and
1010  // we're allocating a POD type).
1011  bool NullCheckResult = NewFTy->hasEmptyExceptionSpec() &&
1012    !(AllocType->isPODType() && !E->hasInitializer());
1013
1014  llvm::BasicBlock *NullCheckSource = 0;
1015  llvm::BasicBlock *NewNotNull = 0;
1016  llvm::BasicBlock *NewEnd = 0;
1017
1018  llvm::Value *NewPtr = RV.getScalarVal();
1019  unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace();
1020
1021  if (NullCheckResult) {
1022    NullCheckSource = Builder.GetInsertBlock();
1023    NewNotNull = createBasicBlock("new.notnull");
1024    NewEnd = createBasicBlock("new.end");
1025
1026    llvm::Value *IsNull = Builder.CreateIsNull(NewPtr, "new.isnull");
1027    Builder.CreateCondBr(IsNull, NewEnd, NewNotNull);
1028    EmitBlock(NewNotNull);
1029  }
1030
1031  assert((AllocSize == AllocSizeWithoutCookie) ==
1032         CalculateCookiePadding(*this, E).isZero());
1033  if (AllocSize != AllocSizeWithoutCookie) {
1034    assert(E->isArray());
1035    NewPtr = CGM.getCXXABI().InitializeArrayCookie(CGF, NewPtr, NumElements,
1036                                                   AllocType);
1037  }
1038
1039  // If there's an operator delete, enter a cleanup to call it if an
1040  // exception is thrown.
1041  EHScopeStack::stable_iterator CallOperatorDelete;
1042  if (E->getOperatorDelete()) {
1043    EnterNewDeleteCleanup(*this, E, NewPtr, AllocSize, NewArgs);
1044    CallOperatorDelete = EHStack.stable_begin();
1045  }
1046
1047  const llvm::Type *ElementPtrTy
1048    = ConvertTypeForMem(AllocType)->getPointerTo(AS);
1049  NewPtr = Builder.CreateBitCast(NewPtr, ElementPtrTy);
1050
1051  if (E->isArray()) {
1052    EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie);
1053
1054    // NewPtr is a pointer to the base element type.  If we're
1055    // allocating an array of arrays, we'll need to cast back to the
1056    // array pointer type.
1057    const llvm::Type *ResultTy = ConvertTypeForMem(E->getType());
1058    if (NewPtr->getType() != ResultTy)
1059      NewPtr = Builder.CreateBitCast(NewPtr, ResultTy);
1060  } else {
1061    EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie);
1062  }
1063
1064  // Deactivate the 'operator delete' cleanup if we finished
1065  // initialization.
1066  if (CallOperatorDelete.isValid())
1067    DeactivateCleanupBlock(CallOperatorDelete);
1068
1069  if (NullCheckResult) {
1070    Builder.CreateBr(NewEnd);
1071    llvm::BasicBlock *NotNullSource = Builder.GetInsertBlock();
1072    EmitBlock(NewEnd);
1073
1074    llvm::PHINode *PHI = Builder.CreatePHI(NewPtr->getType());
1075    PHI->reserveOperandSpace(2);
1076    PHI->addIncoming(NewPtr, NotNullSource);
1077    PHI->addIncoming(llvm::Constant::getNullValue(NewPtr->getType()),
1078                     NullCheckSource);
1079
1080    NewPtr = PHI;
1081  }
1082
1083  return NewPtr;
1084}
1085
1086void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1087                                     llvm::Value *Ptr,
1088                                     QualType DeleteTy) {
1089  assert(DeleteFD->getOverloadedOperator() == OO_Delete);
1090
1091  const FunctionProtoType *DeleteFTy =
1092    DeleteFD->getType()->getAs<FunctionProtoType>();
1093
1094  CallArgList DeleteArgs;
1095
1096  // Check if we need to pass the size to the delete operator.
1097  llvm::Value *Size = 0;
1098  QualType SizeTy;
1099  if (DeleteFTy->getNumArgs() == 2) {
1100    SizeTy = DeleteFTy->getArgType(1);
1101    CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
1102    Size = llvm::ConstantInt::get(ConvertType(SizeTy),
1103                                  DeleteTypeSize.getQuantity());
1104  }
1105
1106  QualType ArgTy = DeleteFTy->getArgType(0);
1107  llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
1108  DeleteArgs.push_back(std::make_pair(RValue::get(DeletePtr), ArgTy));
1109
1110  if (Size)
1111    DeleteArgs.push_back(std::make_pair(RValue::get(Size), SizeTy));
1112
1113  // Emit the call to delete.
1114  EmitCall(CGM.getTypes().getFunctionInfo(DeleteArgs, DeleteFTy),
1115           CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(),
1116           DeleteArgs, DeleteFD);
1117}
1118
1119namespace {
1120  /// Calls the given 'operator delete' on a single object.
1121  struct CallObjectDelete : EHScopeStack::Cleanup {
1122    llvm::Value *Ptr;
1123    const FunctionDecl *OperatorDelete;
1124    QualType ElementType;
1125
1126    CallObjectDelete(llvm::Value *Ptr,
1127                     const FunctionDecl *OperatorDelete,
1128                     QualType ElementType)
1129      : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1130
1131    void Emit(CodeGenFunction &CGF, bool IsForEH) {
1132      CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
1133    }
1134  };
1135}
1136
1137/// Emit the code for deleting a single object.
1138static void EmitObjectDelete(CodeGenFunction &CGF,
1139                             const FunctionDecl *OperatorDelete,
1140                             llvm::Value *Ptr,
1141                             QualType ElementType) {
1142  // Find the destructor for the type, if applicable.  If the
1143  // destructor is virtual, we'll just emit the vcall and return.
1144  const CXXDestructorDecl *Dtor = 0;
1145  if (const RecordType *RT = ElementType->getAs<RecordType>()) {
1146    CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1147    if (!RD->hasTrivialDestructor()) {
1148      Dtor = RD->getDestructor();
1149
1150      if (Dtor->isVirtual()) {
1151        const llvm::Type *Ty =
1152          CGF.getTypes().GetFunctionType(CGF.getTypes().getFunctionInfo(Dtor,
1153                                                               Dtor_Complete),
1154                                         /*isVariadic=*/false);
1155
1156        llvm::Value *Callee
1157          = CGF.BuildVirtualCall(Dtor, Dtor_Deleting, Ptr, Ty);
1158        CGF.EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0,
1159                              0, 0);
1160
1161        // The dtor took care of deleting the object.
1162        return;
1163      }
1164    }
1165  }
1166
1167  // Make sure that we call delete even if the dtor throws.
1168  CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1169                                            Ptr, OperatorDelete, ElementType);
1170
1171  if (Dtor)
1172    CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1173                              /*ForVirtualBase=*/false, Ptr);
1174
1175  CGF.PopCleanupBlock();
1176}
1177
1178namespace {
1179  /// Calls the given 'operator delete' on an array of objects.
1180  struct CallArrayDelete : EHScopeStack::Cleanup {
1181    llvm::Value *Ptr;
1182    const FunctionDecl *OperatorDelete;
1183    llvm::Value *NumElements;
1184    QualType ElementType;
1185    CharUnits CookieSize;
1186
1187    CallArrayDelete(llvm::Value *Ptr,
1188                    const FunctionDecl *OperatorDelete,
1189                    llvm::Value *NumElements,
1190                    QualType ElementType,
1191                    CharUnits CookieSize)
1192      : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
1193        ElementType(ElementType), CookieSize(CookieSize) {}
1194
1195    void Emit(CodeGenFunction &CGF, bool IsForEH) {
1196      const FunctionProtoType *DeleteFTy =
1197        OperatorDelete->getType()->getAs<FunctionProtoType>();
1198      assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2);
1199
1200      CallArgList Args;
1201
1202      // Pass the pointer as the first argument.
1203      QualType VoidPtrTy = DeleteFTy->getArgType(0);
1204      llvm::Value *DeletePtr
1205        = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
1206      Args.push_back(std::make_pair(RValue::get(DeletePtr), VoidPtrTy));
1207
1208      // Pass the original requested size as the second argument.
1209      if (DeleteFTy->getNumArgs() == 2) {
1210        QualType size_t = DeleteFTy->getArgType(1);
1211        const llvm::IntegerType *SizeTy
1212          = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
1213
1214        CharUnits ElementTypeSize =
1215          CGF.CGM.getContext().getTypeSizeInChars(ElementType);
1216
1217        // The size of an element, multiplied by the number of elements.
1218        llvm::Value *Size
1219          = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
1220        Size = CGF.Builder.CreateMul(Size, NumElements);
1221
1222        // Plus the size of the cookie if applicable.
1223        if (!CookieSize.isZero()) {
1224          llvm::Value *CookieSizeV
1225            = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
1226          Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
1227        }
1228
1229        Args.push_back(std::make_pair(RValue::get(Size), size_t));
1230      }
1231
1232      // Emit the call to delete.
1233      CGF.EmitCall(CGF.getTypes().getFunctionInfo(Args, DeleteFTy),
1234                   CGF.CGM.GetAddrOfFunction(OperatorDelete),
1235                   ReturnValueSlot(), Args, OperatorDelete);
1236    }
1237  };
1238}
1239
1240/// Emit the code for deleting an array of objects.
1241static void EmitArrayDelete(CodeGenFunction &CGF,
1242                            const FunctionDecl *OperatorDelete,
1243                            llvm::Value *Ptr,
1244                            QualType ElementType) {
1245  llvm::Value *NumElements = 0;
1246  llvm::Value *AllocatedPtr = 0;
1247  CharUnits CookieSize;
1248  CGF.CGM.getCXXABI().ReadArrayCookie(CGF, Ptr, ElementType,
1249                                      NumElements, AllocatedPtr, CookieSize);
1250
1251  assert(AllocatedPtr && "ReadArrayCookie didn't set AllocatedPtr");
1252
1253  // Make sure that we call delete even if one of the dtors throws.
1254  CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
1255                                           AllocatedPtr, OperatorDelete,
1256                                           NumElements, ElementType,
1257                                           CookieSize);
1258
1259  if (const CXXRecordDecl *RD = ElementType->getAsCXXRecordDecl()) {
1260    if (!RD->hasTrivialDestructor()) {
1261      assert(NumElements && "ReadArrayCookie didn't find element count"
1262                            " for a class with destructor");
1263      CGF.EmitCXXAggrDestructorCall(RD->getDestructor(), NumElements, Ptr);
1264    }
1265  }
1266
1267  CGF.PopCleanupBlock();
1268}
1269
1270void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
1271
1272  // Get at the argument before we performed the implicit conversion
1273  // to void*.
1274  const Expr *Arg = E->getArgument();
1275  while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
1276    if (ICE->getCastKind() != CK_UserDefinedConversion &&
1277        ICE->getType()->isVoidPointerType())
1278      Arg = ICE->getSubExpr();
1279    else
1280      break;
1281  }
1282
1283  llvm::Value *Ptr = EmitScalarExpr(Arg);
1284
1285  // Null check the pointer.
1286  llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
1287  llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
1288
1289  llvm::Value *IsNull =
1290    Builder.CreateICmpEQ(Ptr, llvm::Constant::getNullValue(Ptr->getType()),
1291                         "isnull");
1292
1293  Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
1294  EmitBlock(DeleteNotNull);
1295
1296  // We might be deleting a pointer to array.  If so, GEP down to the
1297  // first non-array element.
1298  // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
1299  QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
1300  if (DeleteTy->isConstantArrayType()) {
1301    llvm::Value *Zero = Builder.getInt32(0);
1302    llvm::SmallVector<llvm::Value*,8> GEP;
1303
1304    GEP.push_back(Zero); // point at the outermost array
1305
1306    // For each layer of array type we're pointing at:
1307    while (const ConstantArrayType *Arr
1308             = getContext().getAsConstantArrayType(DeleteTy)) {
1309      // 1. Unpeel the array type.
1310      DeleteTy = Arr->getElementType();
1311
1312      // 2. GEP to the first element of the array.
1313      GEP.push_back(Zero);
1314    }
1315
1316    Ptr = Builder.CreateInBoundsGEP(Ptr, GEP.begin(), GEP.end(), "del.first");
1317  }
1318
1319  assert(ConvertTypeForMem(DeleteTy) ==
1320         cast<llvm::PointerType>(Ptr->getType())->getElementType());
1321
1322  if (E->isArrayForm()) {
1323    EmitArrayDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy);
1324  } else {
1325    EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy);
1326  }
1327
1328  EmitBlock(DeleteEnd);
1329}
1330
1331llvm::Value * CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
1332  QualType Ty = E->getType();
1333  const llvm::Type *LTy = ConvertType(Ty)->getPointerTo();
1334
1335  if (E->isTypeOperand()) {
1336    llvm::Constant *TypeInfo =
1337      CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand());
1338    return Builder.CreateBitCast(TypeInfo, LTy);
1339  }
1340
1341  Expr *subE = E->getExprOperand();
1342  Ty = subE->getType();
1343  CanQualType CanTy = CGM.getContext().getCanonicalType(Ty);
1344  Ty = CanTy.getUnqualifiedType().getNonReferenceType();
1345  if (const RecordType *RT = Ty->getAs<RecordType>()) {
1346    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1347    if (RD->isPolymorphic()) {
1348      // FIXME: if subE is an lvalue do
1349      LValue Obj = EmitLValue(subE);
1350      llvm::Value *This = Obj.getAddress();
1351      LTy = LTy->getPointerTo()->getPointerTo();
1352      llvm::Value *V = Builder.CreateBitCast(This, LTy);
1353      // We need to do a zero check for *p, unless it has NonNullAttr.
1354      // FIXME: PointerType->hasAttr<NonNullAttr>()
1355      bool CanBeZero = false;
1356      if (UnaryOperator *UO = dyn_cast<UnaryOperator>(subE->IgnoreParens()))
1357        if (UO->getOpcode() == UO_Deref)
1358          CanBeZero = true;
1359      if (CanBeZero) {
1360        llvm::BasicBlock *NonZeroBlock = createBasicBlock();
1361        llvm::BasicBlock *ZeroBlock = createBasicBlock();
1362
1363        llvm::Value *Zero = llvm::Constant::getNullValue(LTy);
1364        Builder.CreateCondBr(Builder.CreateICmpNE(V, Zero),
1365                             NonZeroBlock, ZeroBlock);
1366        EmitBlock(ZeroBlock);
1367        /// Call __cxa_bad_typeid
1368        const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
1369        const llvm::FunctionType *FTy;
1370        FTy = llvm::FunctionType::get(ResultType, false);
1371        llvm::Value *F = CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1372        Builder.CreateCall(F)->setDoesNotReturn();
1373        Builder.CreateUnreachable();
1374        EmitBlock(NonZeroBlock);
1375      }
1376      V = Builder.CreateLoad(V, "vtable");
1377      V = Builder.CreateConstInBoundsGEP1_64(V, -1ULL);
1378      V = Builder.CreateLoad(V);
1379      return V;
1380    }
1381  }
1382  return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(Ty), LTy);
1383}
1384
1385llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *V,
1386                                              const CXXDynamicCastExpr *DCE) {
1387  QualType SrcTy = DCE->getSubExpr()->getType();
1388  QualType DestTy = DCE->getTypeAsWritten();
1389  QualType InnerType = DestTy->getPointeeType();
1390
1391  const llvm::Type *LTy = ConvertType(DCE->getType());
1392
1393  bool CanBeZero = false;
1394  bool ToVoid = false;
1395  bool ThrowOnBad = false;
1396  if (DestTy->isPointerType()) {
1397    // FIXME: if PointerType->hasAttr<NonNullAttr>(), we don't set this
1398    CanBeZero = true;
1399    if (InnerType->isVoidType())
1400      ToVoid = true;
1401  } else {
1402    LTy = LTy->getPointerTo();
1403
1404    // FIXME: What if exceptions are disabled?
1405    ThrowOnBad = true;
1406  }
1407
1408  if (SrcTy->isPointerType() || SrcTy->isReferenceType())
1409    SrcTy = SrcTy->getPointeeType();
1410  SrcTy = SrcTy.getUnqualifiedType();
1411
1412  if (DestTy->isPointerType() || DestTy->isReferenceType())
1413    DestTy = DestTy->getPointeeType();
1414  DestTy = DestTy.getUnqualifiedType();
1415
1416  llvm::BasicBlock *ContBlock = createBasicBlock();
1417  llvm::BasicBlock *NullBlock = 0;
1418  llvm::BasicBlock *NonZeroBlock = 0;
1419  if (CanBeZero) {
1420    NonZeroBlock = createBasicBlock();
1421    NullBlock = createBasicBlock();
1422    Builder.CreateCondBr(Builder.CreateIsNotNull(V), NonZeroBlock, NullBlock);
1423    EmitBlock(NonZeroBlock);
1424  }
1425
1426  llvm::BasicBlock *BadCastBlock = 0;
1427
1428  const llvm::Type *PtrDiffTy = ConvertType(getContext().getPointerDiffType());
1429
1430  // See if this is a dynamic_cast(void*)
1431  if (ToVoid) {
1432    llvm::Value *This = V;
1433    V = Builder.CreateBitCast(This, PtrDiffTy->getPointerTo()->getPointerTo());
1434    V = Builder.CreateLoad(V, "vtable");
1435    V = Builder.CreateConstInBoundsGEP1_64(V, -2ULL);
1436    V = Builder.CreateLoad(V, "offset to top");
1437    This = Builder.CreateBitCast(This, llvm::Type::getInt8PtrTy(VMContext));
1438    V = Builder.CreateInBoundsGEP(This, V);
1439    V = Builder.CreateBitCast(V, LTy);
1440  } else {
1441    /// Call __dynamic_cast
1442    const llvm::Type *ResultType = llvm::Type::getInt8PtrTy(VMContext);
1443    const llvm::FunctionType *FTy;
1444    std::vector<const llvm::Type*> ArgTys;
1445    const llvm::Type *PtrToInt8Ty
1446      = llvm::Type::getInt8Ty(VMContext)->getPointerTo();
1447    ArgTys.push_back(PtrToInt8Ty);
1448    ArgTys.push_back(PtrToInt8Ty);
1449    ArgTys.push_back(PtrToInt8Ty);
1450    ArgTys.push_back(PtrDiffTy);
1451    FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
1452
1453    // FIXME: Calculate better hint.
1454    llvm::Value *hint = llvm::ConstantInt::get(PtrDiffTy, -1ULL);
1455
1456    assert(SrcTy->isRecordType() && "Src type must be record type!");
1457    assert(DestTy->isRecordType() && "Dest type must be record type!");
1458
1459    llvm::Value *SrcArg
1460      = CGM.GetAddrOfRTTIDescriptor(SrcTy.getUnqualifiedType());
1461    llvm::Value *DestArg
1462      = CGM.GetAddrOfRTTIDescriptor(DestTy.getUnqualifiedType());
1463
1464    V = Builder.CreateBitCast(V, PtrToInt8Ty);
1465    V = Builder.CreateCall4(CGM.CreateRuntimeFunction(FTy, "__dynamic_cast"),
1466                            V, SrcArg, DestArg, hint);
1467    V = Builder.CreateBitCast(V, LTy);
1468
1469    if (ThrowOnBad) {
1470      BadCastBlock = createBasicBlock();
1471      Builder.CreateCondBr(Builder.CreateIsNotNull(V), ContBlock, BadCastBlock);
1472      EmitBlock(BadCastBlock);
1473      /// Invoke __cxa_bad_cast
1474      ResultType = llvm::Type::getVoidTy(VMContext);
1475      const llvm::FunctionType *FBadTy;
1476      FBadTy = llvm::FunctionType::get(ResultType, false);
1477      llvm::Value *F = CGM.CreateRuntimeFunction(FBadTy, "__cxa_bad_cast");
1478      if (llvm::BasicBlock *InvokeDest = getInvokeDest()) {
1479        llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
1480        Builder.CreateInvoke(F, Cont, InvokeDest)->setDoesNotReturn();
1481        EmitBlock(Cont);
1482      } else {
1483        // FIXME: Does this ever make sense?
1484        Builder.CreateCall(F)->setDoesNotReturn();
1485      }
1486      Builder.CreateUnreachable();
1487    }
1488  }
1489
1490  if (CanBeZero) {
1491    Builder.CreateBr(ContBlock);
1492    EmitBlock(NullBlock);
1493    Builder.CreateBr(ContBlock);
1494  }
1495  EmitBlock(ContBlock);
1496  if (CanBeZero) {
1497    llvm::PHINode *PHI = Builder.CreatePHI(LTy);
1498    PHI->reserveOperandSpace(2);
1499    PHI->addIncoming(V, NonZeroBlock);
1500    PHI->addIncoming(llvm::Constant::getNullValue(LTy), NullBlock);
1501    V = PHI;
1502  }
1503
1504  return V;
1505}
1506