CGExprCXX.cpp revision c384636f9a405b687990564b204b98e360c81587
1//===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This contains code dealing with code generation of C++ expressions
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CGCXXABI.h"
16#include "CGObjCRuntime.h"
17#include "llvm/Intrinsics.h"
18using namespace clang;
19using namespace CodeGen;
20
21RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
22                                          llvm::Value *Callee,
23                                          ReturnValueSlot ReturnValue,
24                                          llvm::Value *This,
25                                          llvm::Value *VTT,
26                                          CallExpr::const_arg_iterator ArgBeg,
27                                          CallExpr::const_arg_iterator ArgEnd) {
28  assert(MD->isInstance() &&
29         "Trying to emit a member call expr on a static method!");
30
31  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
32
33  CallArgList Args;
34
35  // Push the this ptr.
36  Args.push_back(std::make_pair(RValue::get(This),
37                                MD->getThisType(getContext())));
38
39  // If there is a VTT parameter, emit it.
40  if (VTT) {
41    QualType T = getContext().getPointerType(getContext().VoidPtrTy);
42    Args.push_back(std::make_pair(RValue::get(VTT), T));
43  }
44
45  // And the rest of the call args
46  EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
47
48  QualType ResultType = FPT->getResultType();
49  return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args,
50                                                 FPT->getExtInfo()),
51                  Callee, ReturnValue, Args, MD);
52}
53
54/// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
55/// expr can be devirtualized.
56static bool canDevirtualizeMemberFunctionCalls(const Expr *Base) {
57  if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
58    if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
59      // This is a record decl. We know the type and can devirtualize it.
60      return VD->getType()->isRecordType();
61    }
62
63    return false;
64  }
65
66  // We can always devirtualize calls on temporary object expressions.
67  if (isa<CXXConstructExpr>(Base))
68    return true;
69
70  // And calls on bound temporaries.
71  if (isa<CXXBindTemporaryExpr>(Base))
72    return true;
73
74  // Check if this is a call expr that returns a record type.
75  if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
76    return CE->getCallReturnType()->isRecordType();
77
78  // We can't devirtualize the call.
79  return false;
80}
81
82RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
83                                              ReturnValueSlot ReturnValue) {
84  if (isa<BinaryOperator>(CE->getCallee()->IgnoreParens()))
85    return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
86
87  const MemberExpr *ME = cast<MemberExpr>(CE->getCallee()->IgnoreParens());
88  const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
89
90  if (MD->isStatic()) {
91    // The method is static, emit it as we would a regular call.
92    llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
93    return EmitCall(getContext().getPointerType(MD->getType()), Callee,
94                    ReturnValue, CE->arg_begin(), CE->arg_end());
95  }
96
97  // Compute the object pointer.
98  llvm::Value *This;
99  if (ME->isArrow())
100    This = EmitScalarExpr(ME->getBase());
101  else {
102    LValue BaseLV = EmitLValue(ME->getBase());
103    if (BaseLV.isPropertyRef() || BaseLV.isKVCRef()) {
104      QualType QT = ME->getBase()->getType();
105      RValue RV =
106        BaseLV.isPropertyRef() ? EmitLoadOfPropertyRefLValue(BaseLV, QT)
107          : EmitLoadOfKVCRefLValue(BaseLV, QT);
108      This = RV.isScalar() ? RV.getScalarVal() : RV.getAggregateAddr();
109    }
110    else
111      This = BaseLV.getAddress();
112  }
113
114  if (MD->isTrivial()) {
115    if (isa<CXXDestructorDecl>(MD)) return RValue::get(0);
116
117    assert(MD->isCopyAssignment() && "unknown trivial member function");
118    // We don't like to generate the trivial copy assignment operator when
119    // it isn't necessary; just produce the proper effect here.
120    llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
121    EmitAggregateCopy(This, RHS, CE->getType());
122    return RValue::get(This);
123  }
124
125  // Compute the function type we're calling.
126  const CGFunctionInfo &FInfo =
127    (isa<CXXDestructorDecl>(MD)
128     ? CGM.getTypes().getFunctionInfo(cast<CXXDestructorDecl>(MD),
129                                      Dtor_Complete)
130     : CGM.getTypes().getFunctionInfo(MD));
131
132  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
133  const llvm::Type *Ty
134    = CGM.getTypes().GetFunctionType(FInfo, FPT->isVariadic());
135
136  // C++ [class.virtual]p12:
137  //   Explicit qualification with the scope operator (5.1) suppresses the
138  //   virtual call mechanism.
139  //
140  // We also don't emit a virtual call if the base expression has a record type
141  // because then we know what the type is.
142  bool UseVirtualCall = MD->isVirtual() && !ME->hasQualifier()
143                     && !canDevirtualizeMemberFunctionCalls(ME->getBase());
144
145  llvm::Value *Callee;
146  if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
147    if (UseVirtualCall) {
148      Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty);
149    } else {
150      Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty);
151    }
152  } else if (UseVirtualCall) {
153    Callee = BuildVirtualCall(MD, This, Ty);
154  } else {
155    Callee = CGM.GetAddrOfFunction(MD, Ty);
156  }
157
158  return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
159                           CE->arg_begin(), CE->arg_end());
160}
161
162RValue
163CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
164                                              ReturnValueSlot ReturnValue) {
165  const BinaryOperator *BO =
166      cast<BinaryOperator>(E->getCallee()->IgnoreParens());
167  const Expr *BaseExpr = BO->getLHS();
168  const Expr *MemFnExpr = BO->getRHS();
169
170  const MemberPointerType *MPT =
171    MemFnExpr->getType()->getAs<MemberPointerType>();
172
173  const FunctionProtoType *FPT =
174    MPT->getPointeeType()->getAs<FunctionProtoType>();
175  const CXXRecordDecl *RD =
176    cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
177
178  // Get the member function pointer.
179  llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
180
181  // Emit the 'this' pointer.
182  llvm::Value *This;
183
184  if (BO->getOpcode() == BO_PtrMemI)
185    This = EmitScalarExpr(BaseExpr);
186  else
187    This = EmitLValue(BaseExpr).getAddress();
188
189  // Ask the ABI to load the callee.  Note that This is modified.
190  llvm::Value *Callee =
191    CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(CGF, This, MemFnPtr, MPT);
192
193  CallArgList Args;
194
195  QualType ThisType =
196    getContext().getPointerType(getContext().getTagDeclType(RD));
197
198  // Push the this ptr.
199  Args.push_back(std::make_pair(RValue::get(This), ThisType));
200
201  // And the rest of the call args
202  EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
203  const FunctionType *BO_FPT = BO->getType()->getAs<FunctionProtoType>();
204  return EmitCall(CGM.getTypes().getFunctionInfo(Args, BO_FPT), Callee,
205                  ReturnValue, Args);
206}
207
208RValue
209CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
210                                               const CXXMethodDecl *MD,
211                                               ReturnValueSlot ReturnValue) {
212  assert(MD->isInstance() &&
213         "Trying to emit a member call expr on a static method!");
214  if (MD->isCopyAssignment()) {
215    const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(MD->getDeclContext());
216    if (ClassDecl->hasTrivialCopyAssignment()) {
217      assert(!ClassDecl->hasUserDeclaredCopyAssignment() &&
218             "EmitCXXOperatorMemberCallExpr - user declared copy assignment");
219      LValue LV = EmitLValue(E->getArg(0));
220      llvm::Value *This;
221      if (LV.isPropertyRef() || LV.isKVCRef()) {
222        llvm::Value *AggLoc  = CreateMemTemp(E->getArg(1)->getType());
223        EmitAggExpr(E->getArg(1), AggLoc, false /*VolatileDest*/);
224        if (LV.isPropertyRef())
225          EmitObjCPropertySet(LV.getPropertyRefExpr(),
226                              RValue::getAggregate(AggLoc,
227                                                   false /*VolatileDest*/));
228        else
229          EmitObjCPropertySet(LV.getKVCRefExpr(),
230                              RValue::getAggregate(AggLoc,
231                                                   false /*VolatileDest*/));
232        return RValue::getAggregate(0, false);
233      }
234      else
235        This = LV.getAddress();
236
237      llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
238      QualType Ty = E->getType();
239      EmitAggregateCopy(This, Src, Ty);
240      return RValue::get(This);
241    }
242  }
243
244  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
245  const llvm::Type *Ty =
246    CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
247                                   FPT->isVariadic());
248  LValue LV = EmitLValue(E->getArg(0));
249  llvm::Value *This;
250  if (LV.isPropertyRef() || LV.isKVCRef()) {
251    QualType QT = E->getArg(0)->getType();
252    RValue RV =
253      LV.isPropertyRef() ? EmitLoadOfPropertyRefLValue(LV, QT)
254                         : EmitLoadOfKVCRefLValue(LV, QT);
255    assert (!RV.isScalar() && "EmitCXXOperatorMemberCallExpr");
256    This = RV.getAggregateAddr();
257  }
258  else
259    This = LV.getAddress();
260
261  llvm::Value *Callee;
262  if (MD->isVirtual() && !canDevirtualizeMemberFunctionCalls(E->getArg(0)))
263    Callee = BuildVirtualCall(MD, This, Ty);
264  else
265    Callee = CGM.GetAddrOfFunction(MD, Ty);
266
267  return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
268                           E->arg_begin() + 1, E->arg_end());
269}
270
271void
272CodeGenFunction::EmitCXXConstructExpr(llvm::Value *Dest,
273                                      const CXXConstructExpr *E) {
274  assert(Dest && "Must have a destination!");
275  const CXXConstructorDecl *CD = E->getConstructor();
276
277  // If we require zero initialization before (or instead of) calling the
278  // constructor, as can be the case with a non-user-provided default
279  // constructor, emit the zero initialization now.
280  if (E->requiresZeroInitialization())
281    EmitNullInitialization(Dest, E->getType());
282
283
284  // If this is a call to a trivial default constructor, do nothing.
285  if (CD->isTrivial() && CD->isDefaultConstructor())
286    return;
287
288  // Code gen optimization to eliminate copy constructor and return
289  // its first argument instead, if in fact that argument is a temporary
290  // object.
291  if (getContext().getLangOptions().ElideConstructors && E->isElidable()) {
292    if (const Expr *Arg = E->getArg(0)->getTemporaryObject()) {
293      EmitAggExpr(Arg, Dest, false);
294      return;
295    }
296  }
297
298  const ConstantArrayType *Array
299    = getContext().getAsConstantArrayType(E->getType());
300  if (Array) {
301    QualType BaseElementTy = getContext().getBaseElementType(Array);
302    const llvm::Type *BasePtr = ConvertType(BaseElementTy);
303    BasePtr = llvm::PointerType::getUnqual(BasePtr);
304    llvm::Value *BaseAddrPtr =
305      Builder.CreateBitCast(Dest, BasePtr);
306
307    EmitCXXAggrConstructorCall(CD, Array, BaseAddrPtr,
308                               E->arg_begin(), E->arg_end());
309  }
310  else {
311    CXXCtorType Type =
312      (E->getConstructionKind() == CXXConstructExpr::CK_Complete)
313      ? Ctor_Complete : Ctor_Base;
314    bool ForVirtualBase =
315      E->getConstructionKind() == CXXConstructExpr::CK_VirtualBase;
316
317    // Call the constructor.
318    EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest,
319                           E->arg_begin(), E->arg_end());
320  }
321}
322
323/// Check whether the given operator new[] is the global placement
324/// operator new[].
325static bool IsPlacementOperatorNewArray(ASTContext &Ctx,
326                                        const FunctionDecl *Fn) {
327  // Must be in global scope.  Note that allocation functions can't be
328  // declared in namespaces.
329  if (!Fn->getDeclContext()->getRedeclContext()->isFileContext())
330    return false;
331
332  // Signature must be void *operator new[](size_t, void*).
333  // The size_t is common to all operator new[]s.
334  if (Fn->getNumParams() != 2)
335    return false;
336
337  CanQualType ParamType = Ctx.getCanonicalType(Fn->getParamDecl(1)->getType());
338  return (ParamType == Ctx.VoidPtrTy);
339}
340
341static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
342                                        const CXXNewExpr *E) {
343  if (!E->isArray())
344    return CharUnits::Zero();
345
346  // No cookie is required if the new operator being used is
347  // ::operator new[](size_t, void*).
348  const FunctionDecl *OperatorNew = E->getOperatorNew();
349  if (IsPlacementOperatorNewArray(CGF.getContext(), OperatorNew))
350    return CharUnits::Zero();
351
352  return CGF.CGM.getCXXABI().GetArrayCookieSize(E->getAllocatedType());
353}
354
355static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context,
356                                        CodeGenFunction &CGF,
357                                        const CXXNewExpr *E,
358                                        llvm::Value *&NumElements,
359                                        llvm::Value *&SizeWithoutCookie) {
360  QualType ElemType = E->getAllocatedType();
361
362  const llvm::IntegerType *SizeTy =
363    cast<llvm::IntegerType>(CGF.ConvertType(CGF.getContext().getSizeType()));
364
365  CharUnits TypeSize = CGF.getContext().getTypeSizeInChars(ElemType);
366
367  if (!E->isArray()) {
368    SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
369    return SizeWithoutCookie;
370  }
371
372  // Figure out the cookie size.
373  CharUnits CookieSize = CalculateCookiePadding(CGF, E);
374
375  // Emit the array size expression.
376  // We multiply the size of all dimensions for NumElements.
377  // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
378  NumElements = CGF.EmitScalarExpr(E->getArraySize());
379  assert(NumElements->getType() == SizeTy && "element count not a size_t");
380
381  uint64_t ArraySizeMultiplier = 1;
382  while (const ConstantArrayType *CAT
383             = CGF.getContext().getAsConstantArrayType(ElemType)) {
384    ElemType = CAT->getElementType();
385    ArraySizeMultiplier *= CAT->getSize().getZExtValue();
386  }
387
388  llvm::Value *Size;
389
390  // If someone is doing 'new int[42]' there is no need to do a dynamic check.
391  // Don't bloat the -O0 code.
392  if (llvm::ConstantInt *NumElementsC =
393        dyn_cast<llvm::ConstantInt>(NumElements)) {
394    llvm::APInt NEC = NumElementsC->getValue();
395    unsigned SizeWidth = NEC.getBitWidth();
396
397    // Determine if there is an overflow here by doing an extended multiply.
398    NEC.zext(SizeWidth*2);
399    llvm::APInt SC(SizeWidth*2, TypeSize.getQuantity());
400    SC *= NEC;
401
402    if (!CookieSize.isZero()) {
403      // Save the current size without a cookie.  We don't care if an
404      // overflow's already happened because SizeWithoutCookie isn't
405      // used if the allocator returns null or throws, as it should
406      // always do on an overflow.
407      llvm::APInt SWC = SC;
408      SWC.trunc(SizeWidth);
409      SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, SWC);
410
411      // Add the cookie size.
412      SC += llvm::APInt(SizeWidth*2, CookieSize.getQuantity());
413    }
414
415    if (SC.countLeadingZeros() >= SizeWidth) {
416      SC.trunc(SizeWidth);
417      Size = llvm::ConstantInt::get(SizeTy, SC);
418    } else {
419      // On overflow, produce a -1 so operator new throws.
420      Size = llvm::Constant::getAllOnesValue(SizeTy);
421    }
422
423    // Scale NumElements while we're at it.
424    uint64_t N = NEC.getZExtValue() * ArraySizeMultiplier;
425    NumElements = llvm::ConstantInt::get(SizeTy, N);
426
427  // Otherwise, we don't need to do an overflow-checked multiplication if
428  // we're multiplying by one.
429  } else if (TypeSize.isOne()) {
430    assert(ArraySizeMultiplier == 1);
431
432    Size = NumElements;
433
434    // If we need a cookie, add its size in with an overflow check.
435    // This is maybe a little paranoid.
436    if (!CookieSize.isZero()) {
437      SizeWithoutCookie = Size;
438
439      llvm::Value *CookieSizeV
440        = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
441
442      const llvm::Type *Types[] = { SizeTy };
443      llvm::Value *UAddF
444        = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1);
445      llvm::Value *AddRes
446        = CGF.Builder.CreateCall2(UAddF, Size, CookieSizeV);
447
448      Size = CGF.Builder.CreateExtractValue(AddRes, 0);
449      llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1);
450      Size = CGF.Builder.CreateSelect(DidOverflow,
451                                      llvm::ConstantInt::get(SizeTy, -1),
452                                      Size);
453    }
454
455  // Otherwise use the int.umul.with.overflow intrinsic.
456  } else {
457    llvm::Value *OutermostElementSize
458      = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
459
460    llvm::Value *NumOutermostElements = NumElements;
461
462    // Scale NumElements by the array size multiplier.  This might
463    // overflow, but only if the multiplication below also overflows,
464    // in which case this multiplication isn't used.
465    if (ArraySizeMultiplier != 1)
466      NumElements = CGF.Builder.CreateMul(NumElements,
467                         llvm::ConstantInt::get(SizeTy, ArraySizeMultiplier));
468
469    // The requested size of the outermost array is non-constant.
470    // Multiply that by the static size of the elements of that array;
471    // on unsigned overflow, set the size to -1 to trigger an
472    // exception from the allocation routine.  This is sufficient to
473    // prevent buffer overruns from the allocator returning a
474    // seemingly valid pointer to insufficient space.  This idea comes
475    // originally from MSVC, and GCC has an open bug requesting
476    // similar behavior:
477    //   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=19351
478    //
479    // This will not be sufficient for C++0x, which requires a
480    // specific exception class (std::bad_array_new_length).
481    // That will require ABI support that has not yet been specified.
482    const llvm::Type *Types[] = { SizeTy };
483    llvm::Value *UMulF
484      = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, Types, 1);
485    llvm::Value *MulRes = CGF.Builder.CreateCall2(UMulF, NumOutermostElements,
486                                                  OutermostElementSize);
487
488    // The overflow bit.
489    llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(MulRes, 1);
490
491    // The result of the multiplication.
492    Size = CGF.Builder.CreateExtractValue(MulRes, 0);
493
494    // If we have a cookie, we need to add that size in, too.
495    if (!CookieSize.isZero()) {
496      SizeWithoutCookie = Size;
497
498      llvm::Value *CookieSizeV
499        = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
500      llvm::Value *UAddF
501        = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1);
502      llvm::Value *AddRes
503        = CGF.Builder.CreateCall2(UAddF, SizeWithoutCookie, CookieSizeV);
504
505      Size = CGF.Builder.CreateExtractValue(AddRes, 0);
506
507      llvm::Value *AddDidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1);
508      DidOverflow = CGF.Builder.CreateAnd(DidOverflow, AddDidOverflow);
509    }
510
511    Size = CGF.Builder.CreateSelect(DidOverflow,
512                                    llvm::ConstantInt::get(SizeTy, -1),
513                                    Size);
514  }
515
516  if (CookieSize.isZero())
517    SizeWithoutCookie = Size;
518  else
519    assert(SizeWithoutCookie && "didn't set SizeWithoutCookie?");
520
521  return Size;
522}
523
524static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E,
525                                    llvm::Value *NewPtr) {
526
527  assert(E->getNumConstructorArgs() == 1 &&
528         "Can only have one argument to initializer of POD type.");
529
530  const Expr *Init = E->getConstructorArg(0);
531  QualType AllocType = E->getAllocatedType();
532
533  unsigned Alignment =
534    CGF.getContext().getTypeAlignInChars(AllocType).getQuantity();
535  if (!CGF.hasAggregateLLVMType(AllocType))
536    CGF.EmitStoreOfScalar(CGF.EmitScalarExpr(Init), NewPtr,
537                          AllocType.isVolatileQualified(), Alignment,
538                          AllocType);
539  else if (AllocType->isAnyComplexType())
540    CGF.EmitComplexExprIntoAddr(Init, NewPtr,
541                                AllocType.isVolatileQualified());
542  else
543    CGF.EmitAggExpr(Init, NewPtr, AllocType.isVolatileQualified());
544}
545
546void
547CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
548                                         llvm::Value *NewPtr,
549                                         llvm::Value *NumElements) {
550  // We have a POD type.
551  if (E->getNumConstructorArgs() == 0)
552    return;
553
554  const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
555
556  // Create a temporary for the loop index and initialize it with 0.
557  llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index");
558  llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
559  Builder.CreateStore(Zero, IndexPtr);
560
561  // Start the loop with a block that tests the condition.
562  llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
563  llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
564
565  EmitBlock(CondBlock);
566
567  llvm::BasicBlock *ForBody = createBasicBlock("for.body");
568
569  // Generate: if (loop-index < number-of-elements fall to the loop body,
570  // otherwise, go to the block after the for-loop.
571  llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
572  llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElements, "isless");
573  // If the condition is true, execute the body.
574  Builder.CreateCondBr(IsLess, ForBody, AfterFor);
575
576  EmitBlock(ForBody);
577
578  llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
579  // Inside the loop body, emit the constructor call on the array element.
580  Counter = Builder.CreateLoad(IndexPtr);
581  llvm::Value *Address = Builder.CreateInBoundsGEP(NewPtr, Counter,
582                                                   "arrayidx");
583  StoreAnyExprIntoOneUnit(*this, E, Address);
584
585  EmitBlock(ContinueBlock);
586
587  // Emit the increment of the loop counter.
588  llvm::Value *NextVal = llvm::ConstantInt::get(SizeTy, 1);
589  Counter = Builder.CreateLoad(IndexPtr);
590  NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
591  Builder.CreateStore(NextVal, IndexPtr);
592
593  // Finally, branch back up to the condition for the next iteration.
594  EmitBranch(CondBlock);
595
596  // Emit the fall-through block.
597  EmitBlock(AfterFor, true);
598}
599
600static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
601                           llvm::Value *NewPtr, llvm::Value *Size) {
602  llvm::LLVMContext &VMContext = CGF.CGM.getLLVMContext();
603  const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
604  if (NewPtr->getType() != BP)
605    NewPtr = CGF.Builder.CreateBitCast(NewPtr, BP, "tmp");
606
607  CGF.Builder.CreateCall5(CGF.CGM.getMemSetFn(BP, CGF.IntPtrTy), NewPtr,
608                llvm::Constant::getNullValue(llvm::Type::getInt8Ty(VMContext)),
609                          Size,
610                    llvm::ConstantInt::get(CGF.Int32Ty,
611                                           CGF.getContext().getTypeAlign(T)/8),
612                          llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext),
613                                                 0));
614}
615
616static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
617                               llvm::Value *NewPtr,
618                               llvm::Value *NumElements,
619                               llvm::Value *AllocSizeWithoutCookie) {
620  if (E->isArray()) {
621    if (CXXConstructorDecl *Ctor = E->getConstructor()) {
622      bool RequiresZeroInitialization = false;
623      if (Ctor->getParent()->hasTrivialConstructor()) {
624        // If new expression did not specify value-initialization, then there
625        // is no initialization.
626        if (!E->hasInitializer() || Ctor->getParent()->isEmpty())
627          return;
628
629        if (CGF.CGM.getTypes().isZeroInitializable(E->getAllocatedType())) {
630          // Optimization: since zero initialization will just set the memory
631          // to all zeroes, generate a single memset to do it in one shot.
632          EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr,
633                         AllocSizeWithoutCookie);
634          return;
635        }
636
637        RequiresZeroInitialization = true;
638      }
639
640      CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
641                                     E->constructor_arg_begin(),
642                                     E->constructor_arg_end(),
643                                     RequiresZeroInitialization);
644      return;
645    } else if (E->getNumConstructorArgs() == 1 &&
646               isa<ImplicitValueInitExpr>(E->getConstructorArg(0))) {
647      // Optimization: since zero initialization will just set the memory
648      // to all zeroes, generate a single memset to do it in one shot.
649      EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr,
650                     AllocSizeWithoutCookie);
651      return;
652    } else {
653      CGF.EmitNewArrayInitializer(E, NewPtr, NumElements);
654      return;
655    }
656  }
657
658  if (CXXConstructorDecl *Ctor = E->getConstructor()) {
659    // Per C++ [expr.new]p15, if we have an initializer, then we're performing
660    // direct initialization. C++ [dcl.init]p5 requires that we
661    // zero-initialize storage if there are no user-declared constructors.
662    if (E->hasInitializer() &&
663        !Ctor->getParent()->hasUserDeclaredConstructor() &&
664        !Ctor->getParent()->isEmpty())
665      CGF.EmitNullInitialization(NewPtr, E->getAllocatedType());
666
667    CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
668                               NewPtr, E->constructor_arg_begin(),
669                               E->constructor_arg_end());
670
671    return;
672  }
673  // We have a POD type.
674  if (E->getNumConstructorArgs() == 0)
675    return;
676
677  StoreAnyExprIntoOneUnit(CGF, E, NewPtr);
678}
679
680namespace {
681  /// A cleanup to call the given 'operator delete' function upon
682  /// abnormal exit from a new expression.
683  class CallDeleteDuringNew : public EHScopeStack::Cleanup {
684    size_t NumPlacementArgs;
685    const FunctionDecl *OperatorDelete;
686    llvm::Value *Ptr;
687    llvm::Value *AllocSize;
688
689    RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
690
691  public:
692    static size_t getExtraSize(size_t NumPlacementArgs) {
693      return NumPlacementArgs * sizeof(RValue);
694    }
695
696    CallDeleteDuringNew(size_t NumPlacementArgs,
697                        const FunctionDecl *OperatorDelete,
698                        llvm::Value *Ptr,
699                        llvm::Value *AllocSize)
700      : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
701        Ptr(Ptr), AllocSize(AllocSize) {}
702
703    void setPlacementArg(unsigned I, RValue Arg) {
704      assert(I < NumPlacementArgs && "index out of range");
705      getPlacementArgs()[I] = Arg;
706    }
707
708    void Emit(CodeGenFunction &CGF, bool IsForEH) {
709      const FunctionProtoType *FPT
710        = OperatorDelete->getType()->getAs<FunctionProtoType>();
711      assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
712             (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
713
714      CallArgList DeleteArgs;
715
716      // The first argument is always a void*.
717      FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
718      DeleteArgs.push_back(std::make_pair(RValue::get(Ptr), *AI++));
719
720      // A member 'operator delete' can take an extra 'size_t' argument.
721      if (FPT->getNumArgs() == NumPlacementArgs + 2)
722        DeleteArgs.push_back(std::make_pair(RValue::get(AllocSize), *AI++));
723
724      // Pass the rest of the arguments, which must match exactly.
725      for (unsigned I = 0; I != NumPlacementArgs; ++I)
726        DeleteArgs.push_back(std::make_pair(getPlacementArgs()[I], *AI++));
727
728      // Call 'operator delete'.
729      CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT),
730                   CGF.CGM.GetAddrOfFunction(OperatorDelete),
731                   ReturnValueSlot(), DeleteArgs, OperatorDelete);
732    }
733  };
734}
735
736llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
737  QualType AllocType = E->getAllocatedType();
738  if (AllocType->isArrayType())
739    while (const ArrayType *AType = getContext().getAsArrayType(AllocType))
740      AllocType = AType->getElementType();
741
742  FunctionDecl *NewFD = E->getOperatorNew();
743  const FunctionProtoType *NewFTy = NewFD->getType()->getAs<FunctionProtoType>();
744
745  CallArgList NewArgs;
746
747  // The allocation size is the first argument.
748  QualType SizeTy = getContext().getSizeType();
749
750  llvm::Value *NumElements = 0;
751  llvm::Value *AllocSizeWithoutCookie = 0;
752  llvm::Value *AllocSize = EmitCXXNewAllocSize(getContext(),
753                                               *this, E, NumElements,
754                                               AllocSizeWithoutCookie);
755
756  NewArgs.push_back(std::make_pair(RValue::get(AllocSize), SizeTy));
757
758  // Emit the rest of the arguments.
759  // FIXME: Ideally, this should just use EmitCallArgs.
760  CXXNewExpr::const_arg_iterator NewArg = E->placement_arg_begin();
761
762  // First, use the types from the function type.
763  // We start at 1 here because the first argument (the allocation size)
764  // has already been emitted.
765  for (unsigned i = 1, e = NewFTy->getNumArgs(); i != e; ++i, ++NewArg) {
766    QualType ArgType = NewFTy->getArgType(i);
767
768    assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
769           getTypePtr() ==
770           getContext().getCanonicalType(NewArg->getType()).getTypePtr() &&
771           "type mismatch in call argument!");
772
773    NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
774                                     ArgType));
775
776  }
777
778  // Either we've emitted all the call args, or we have a call to a
779  // variadic function.
780  assert((NewArg == E->placement_arg_end() || NewFTy->isVariadic()) &&
781         "Extra arguments in non-variadic function!");
782
783  // If we still have any arguments, emit them using the type of the argument.
784  for (CXXNewExpr::const_arg_iterator NewArgEnd = E->placement_arg_end();
785       NewArg != NewArgEnd; ++NewArg) {
786    QualType ArgType = NewArg->getType();
787    NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
788                                     ArgType));
789  }
790
791  // Emit the call to new.
792  RValue RV =
793    EmitCall(CGM.getTypes().getFunctionInfo(NewArgs, NewFTy),
794             CGM.GetAddrOfFunction(NewFD), ReturnValueSlot(), NewArgs, NewFD);
795
796  // If an allocation function is declared with an empty exception specification
797  // it returns null to indicate failure to allocate storage. [expr.new]p13.
798  // (We don't need to check for null when there's no new initializer and
799  // we're allocating a POD type).
800  bool NullCheckResult = NewFTy->hasEmptyExceptionSpec() &&
801    !(AllocType->isPODType() && !E->hasInitializer());
802
803  llvm::BasicBlock *NullCheckSource = 0;
804  llvm::BasicBlock *NewNotNull = 0;
805  llvm::BasicBlock *NewEnd = 0;
806
807  llvm::Value *NewPtr = RV.getScalarVal();
808  unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace();
809
810  if (NullCheckResult) {
811    NullCheckSource = Builder.GetInsertBlock();
812    NewNotNull = createBasicBlock("new.notnull");
813    NewEnd = createBasicBlock("new.end");
814
815    llvm::Value *IsNull = Builder.CreateIsNull(NewPtr, "new.isnull");
816    Builder.CreateCondBr(IsNull, NewEnd, NewNotNull);
817    EmitBlock(NewNotNull);
818  }
819
820  assert((AllocSize == AllocSizeWithoutCookie) ==
821         CalculateCookiePadding(*this, E).isZero());
822  if (AllocSize != AllocSizeWithoutCookie) {
823    assert(E->isArray());
824    NewPtr = CGM.getCXXABI().InitializeArrayCookie(CGF, NewPtr, NumElements,
825                                                   AllocType);
826  }
827
828  // If there's an operator delete, enter a cleanup to call it if an
829  // exception is thrown.
830  EHScopeStack::stable_iterator CallOperatorDelete;
831  if (E->getOperatorDelete()) {
832    CallDeleteDuringNew *Cleanup = CGF.EHStack
833      .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
834                                                 E->getNumPlacementArgs(),
835                                                 E->getOperatorDelete(),
836                                                 NewPtr, AllocSize);
837    for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
838      Cleanup->setPlacementArg(I, NewArgs[I+1].first);
839    CallOperatorDelete = EHStack.stable_begin();
840  }
841
842  const llvm::Type *ElementPtrTy
843    = ConvertTypeForMem(AllocType)->getPointerTo(AS);
844  NewPtr = Builder.CreateBitCast(NewPtr, ElementPtrTy);
845
846  if (E->isArray()) {
847    EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie);
848
849    // NewPtr is a pointer to the base element type.  If we're
850    // allocating an array of arrays, we'll need to cast back to the
851    // array pointer type.
852    const llvm::Type *ResultTy = ConvertTypeForMem(E->getType());
853    if (NewPtr->getType() != ResultTy)
854      NewPtr = Builder.CreateBitCast(NewPtr, ResultTy);
855  } else {
856    EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie);
857  }
858
859  // Deactivate the 'operator delete' cleanup if we finished
860  // initialization.
861  if (CallOperatorDelete.isValid())
862    DeactivateCleanupBlock(CallOperatorDelete);
863
864  if (NullCheckResult) {
865    Builder.CreateBr(NewEnd);
866    llvm::BasicBlock *NotNullSource = Builder.GetInsertBlock();
867    EmitBlock(NewEnd);
868
869    llvm::PHINode *PHI = Builder.CreatePHI(NewPtr->getType());
870    PHI->reserveOperandSpace(2);
871    PHI->addIncoming(NewPtr, NotNullSource);
872    PHI->addIncoming(llvm::Constant::getNullValue(NewPtr->getType()),
873                     NullCheckSource);
874
875    NewPtr = PHI;
876  }
877
878  return NewPtr;
879}
880
881void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
882                                     llvm::Value *Ptr,
883                                     QualType DeleteTy) {
884  assert(DeleteFD->getOverloadedOperator() == OO_Delete);
885
886  const FunctionProtoType *DeleteFTy =
887    DeleteFD->getType()->getAs<FunctionProtoType>();
888
889  CallArgList DeleteArgs;
890
891  // Check if we need to pass the size to the delete operator.
892  llvm::Value *Size = 0;
893  QualType SizeTy;
894  if (DeleteFTy->getNumArgs() == 2) {
895    SizeTy = DeleteFTy->getArgType(1);
896    CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
897    Size = llvm::ConstantInt::get(ConvertType(SizeTy),
898                                  DeleteTypeSize.getQuantity());
899  }
900
901  QualType ArgTy = DeleteFTy->getArgType(0);
902  llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
903  DeleteArgs.push_back(std::make_pair(RValue::get(DeletePtr), ArgTy));
904
905  if (Size)
906    DeleteArgs.push_back(std::make_pair(RValue::get(Size), SizeTy));
907
908  // Emit the call to delete.
909  EmitCall(CGM.getTypes().getFunctionInfo(DeleteArgs, DeleteFTy),
910           CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(),
911           DeleteArgs, DeleteFD);
912}
913
914namespace {
915  /// Calls the given 'operator delete' on a single object.
916  struct CallObjectDelete : EHScopeStack::Cleanup {
917    llvm::Value *Ptr;
918    const FunctionDecl *OperatorDelete;
919    QualType ElementType;
920
921    CallObjectDelete(llvm::Value *Ptr,
922                     const FunctionDecl *OperatorDelete,
923                     QualType ElementType)
924      : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
925
926    void Emit(CodeGenFunction &CGF, bool IsForEH) {
927      CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
928    }
929  };
930}
931
932/// Emit the code for deleting a single object.
933static void EmitObjectDelete(CodeGenFunction &CGF,
934                             const FunctionDecl *OperatorDelete,
935                             llvm::Value *Ptr,
936                             QualType ElementType) {
937  // Find the destructor for the type, if applicable.  If the
938  // destructor is virtual, we'll just emit the vcall and return.
939  const CXXDestructorDecl *Dtor = 0;
940  if (const RecordType *RT = ElementType->getAs<RecordType>()) {
941    CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
942    if (!RD->hasTrivialDestructor()) {
943      Dtor = RD->getDestructor();
944
945      if (Dtor->isVirtual()) {
946        const llvm::Type *Ty =
947          CGF.getTypes().GetFunctionType(CGF.getTypes().getFunctionInfo(Dtor,
948                                                               Dtor_Complete),
949                                         /*isVariadic=*/false);
950
951        llvm::Value *Callee
952          = CGF.BuildVirtualCall(Dtor, Dtor_Deleting, Ptr, Ty);
953        CGF.EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0,
954                              0, 0);
955
956        // The dtor took care of deleting the object.
957        return;
958      }
959    }
960  }
961
962  // Make sure that we call delete even if the dtor throws.
963  CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
964                                            Ptr, OperatorDelete, ElementType);
965
966  if (Dtor)
967    CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
968                              /*ForVirtualBase=*/false, Ptr);
969
970  CGF.PopCleanupBlock();
971}
972
973namespace {
974  /// Calls the given 'operator delete' on an array of objects.
975  struct CallArrayDelete : EHScopeStack::Cleanup {
976    llvm::Value *Ptr;
977    const FunctionDecl *OperatorDelete;
978    llvm::Value *NumElements;
979    QualType ElementType;
980    CharUnits CookieSize;
981
982    CallArrayDelete(llvm::Value *Ptr,
983                    const FunctionDecl *OperatorDelete,
984                    llvm::Value *NumElements,
985                    QualType ElementType,
986                    CharUnits CookieSize)
987      : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
988        ElementType(ElementType), CookieSize(CookieSize) {}
989
990    void Emit(CodeGenFunction &CGF, bool IsForEH) {
991      const FunctionProtoType *DeleteFTy =
992        OperatorDelete->getType()->getAs<FunctionProtoType>();
993      assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2);
994
995      CallArgList Args;
996
997      // Pass the pointer as the first argument.
998      QualType VoidPtrTy = DeleteFTy->getArgType(0);
999      llvm::Value *DeletePtr
1000        = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
1001      Args.push_back(std::make_pair(RValue::get(DeletePtr), VoidPtrTy));
1002
1003      // Pass the original requested size as the second argument.
1004      if (DeleteFTy->getNumArgs() == 2) {
1005        QualType size_t = DeleteFTy->getArgType(1);
1006        const llvm::IntegerType *SizeTy
1007          = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
1008
1009        CharUnits ElementTypeSize =
1010          CGF.CGM.getContext().getTypeSizeInChars(ElementType);
1011
1012        // The size of an element, multiplied by the number of elements.
1013        llvm::Value *Size
1014          = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
1015        Size = CGF.Builder.CreateMul(Size, NumElements);
1016
1017        // Plus the size of the cookie if applicable.
1018        if (!CookieSize.isZero()) {
1019          llvm::Value *CookieSizeV
1020            = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
1021          Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
1022        }
1023
1024        Args.push_back(std::make_pair(RValue::get(Size), size_t));
1025      }
1026
1027      // Emit the call to delete.
1028      CGF.EmitCall(CGF.getTypes().getFunctionInfo(Args, DeleteFTy),
1029                   CGF.CGM.GetAddrOfFunction(OperatorDelete),
1030                   ReturnValueSlot(), Args, OperatorDelete);
1031    }
1032  };
1033}
1034
1035/// Emit the code for deleting an array of objects.
1036static void EmitArrayDelete(CodeGenFunction &CGF,
1037                            const FunctionDecl *OperatorDelete,
1038                            llvm::Value *Ptr,
1039                            QualType ElementType) {
1040  llvm::Value *NumElements = 0;
1041  llvm::Value *AllocatedPtr = 0;
1042  CharUnits CookieSize;
1043  CGF.CGM.getCXXABI().ReadArrayCookie(CGF, Ptr, ElementType,
1044                                      NumElements, AllocatedPtr, CookieSize);
1045
1046  assert(AllocatedPtr && "ReadArrayCookie didn't set AllocatedPtr");
1047
1048  // Make sure that we call delete even if one of the dtors throws.
1049  CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
1050                                           AllocatedPtr, OperatorDelete,
1051                                           NumElements, ElementType,
1052                                           CookieSize);
1053
1054  if (const CXXRecordDecl *RD = ElementType->getAsCXXRecordDecl()) {
1055    if (!RD->hasTrivialDestructor()) {
1056      assert(NumElements && "ReadArrayCookie didn't find element count"
1057                            " for a class with destructor");
1058      CGF.EmitCXXAggrDestructorCall(RD->getDestructor(), NumElements, Ptr);
1059    }
1060  }
1061
1062  CGF.PopCleanupBlock();
1063}
1064
1065void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
1066
1067  // Get at the argument before we performed the implicit conversion
1068  // to void*.
1069  const Expr *Arg = E->getArgument();
1070  while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
1071    if (ICE->getCastKind() != CK_UserDefinedConversion &&
1072        ICE->getType()->isVoidPointerType())
1073      Arg = ICE->getSubExpr();
1074    else
1075      break;
1076  }
1077
1078  llvm::Value *Ptr = EmitScalarExpr(Arg);
1079
1080  // Null check the pointer.
1081  llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
1082  llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
1083
1084  llvm::Value *IsNull =
1085    Builder.CreateICmpEQ(Ptr, llvm::Constant::getNullValue(Ptr->getType()),
1086                         "isnull");
1087
1088  Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
1089  EmitBlock(DeleteNotNull);
1090
1091  // We might be deleting a pointer to array.  If so, GEP down to the
1092  // first non-array element.
1093  // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
1094  QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
1095  if (DeleteTy->isConstantArrayType()) {
1096    llvm::Value *Zero = Builder.getInt32(0);
1097    llvm::SmallVector<llvm::Value*,8> GEP;
1098
1099    GEP.push_back(Zero); // point at the outermost array
1100
1101    // For each layer of array type we're pointing at:
1102    while (const ConstantArrayType *Arr
1103             = getContext().getAsConstantArrayType(DeleteTy)) {
1104      // 1. Unpeel the array type.
1105      DeleteTy = Arr->getElementType();
1106
1107      // 2. GEP to the first element of the array.
1108      GEP.push_back(Zero);
1109    }
1110
1111    Ptr = Builder.CreateInBoundsGEP(Ptr, GEP.begin(), GEP.end(), "del.first");
1112  }
1113
1114  assert(ConvertTypeForMem(DeleteTy) ==
1115         cast<llvm::PointerType>(Ptr->getType())->getElementType());
1116
1117  if (E->isArrayForm()) {
1118    EmitArrayDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy);
1119  } else {
1120    EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy);
1121  }
1122
1123  EmitBlock(DeleteEnd);
1124}
1125
1126llvm::Value * CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
1127  QualType Ty = E->getType();
1128  const llvm::Type *LTy = ConvertType(Ty)->getPointerTo();
1129
1130  if (E->isTypeOperand()) {
1131    llvm::Constant *TypeInfo =
1132      CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand());
1133    return Builder.CreateBitCast(TypeInfo, LTy);
1134  }
1135
1136  Expr *subE = E->getExprOperand();
1137  Ty = subE->getType();
1138  CanQualType CanTy = CGM.getContext().getCanonicalType(Ty);
1139  Ty = CanTy.getUnqualifiedType().getNonReferenceType();
1140  if (const RecordType *RT = Ty->getAs<RecordType>()) {
1141    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1142    if (RD->isPolymorphic()) {
1143      // FIXME: if subE is an lvalue do
1144      LValue Obj = EmitLValue(subE);
1145      llvm::Value *This = Obj.getAddress();
1146      LTy = LTy->getPointerTo()->getPointerTo();
1147      llvm::Value *V = Builder.CreateBitCast(This, LTy);
1148      // We need to do a zero check for *p, unless it has NonNullAttr.
1149      // FIXME: PointerType->hasAttr<NonNullAttr>()
1150      bool CanBeZero = false;
1151      if (UnaryOperator *UO = dyn_cast<UnaryOperator>(subE->IgnoreParens()))
1152        if (UO->getOpcode() == UO_Deref)
1153          CanBeZero = true;
1154      if (CanBeZero) {
1155        llvm::BasicBlock *NonZeroBlock = createBasicBlock();
1156        llvm::BasicBlock *ZeroBlock = createBasicBlock();
1157
1158        llvm::Value *Zero = llvm::Constant::getNullValue(LTy);
1159        Builder.CreateCondBr(Builder.CreateICmpNE(V, Zero),
1160                             NonZeroBlock, ZeroBlock);
1161        EmitBlock(ZeroBlock);
1162        /// Call __cxa_bad_typeid
1163        const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
1164        const llvm::FunctionType *FTy;
1165        FTy = llvm::FunctionType::get(ResultType, false);
1166        llvm::Value *F = CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1167        Builder.CreateCall(F)->setDoesNotReturn();
1168        Builder.CreateUnreachable();
1169        EmitBlock(NonZeroBlock);
1170      }
1171      V = Builder.CreateLoad(V, "vtable");
1172      V = Builder.CreateConstInBoundsGEP1_64(V, -1ULL);
1173      V = Builder.CreateLoad(V);
1174      return V;
1175    }
1176  }
1177  return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(Ty), LTy);
1178}
1179
1180llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *V,
1181                                              const CXXDynamicCastExpr *DCE) {
1182  QualType SrcTy = DCE->getSubExpr()->getType();
1183  QualType DestTy = DCE->getTypeAsWritten();
1184  QualType InnerType = DestTy->getPointeeType();
1185
1186  const llvm::Type *LTy = ConvertType(DCE->getType());
1187
1188  bool CanBeZero = false;
1189  bool ToVoid = false;
1190  bool ThrowOnBad = false;
1191  if (DestTy->isPointerType()) {
1192    // FIXME: if PointerType->hasAttr<NonNullAttr>(), we don't set this
1193    CanBeZero = true;
1194    if (InnerType->isVoidType())
1195      ToVoid = true;
1196  } else {
1197    LTy = LTy->getPointerTo();
1198
1199    // FIXME: What if exceptions are disabled?
1200    ThrowOnBad = true;
1201  }
1202
1203  if (SrcTy->isPointerType() || SrcTy->isReferenceType())
1204    SrcTy = SrcTy->getPointeeType();
1205  SrcTy = SrcTy.getUnqualifiedType();
1206
1207  if (DestTy->isPointerType() || DestTy->isReferenceType())
1208    DestTy = DestTy->getPointeeType();
1209  DestTy = DestTy.getUnqualifiedType();
1210
1211  llvm::BasicBlock *ContBlock = createBasicBlock();
1212  llvm::BasicBlock *NullBlock = 0;
1213  llvm::BasicBlock *NonZeroBlock = 0;
1214  if (CanBeZero) {
1215    NonZeroBlock = createBasicBlock();
1216    NullBlock = createBasicBlock();
1217    Builder.CreateCondBr(Builder.CreateIsNotNull(V), NonZeroBlock, NullBlock);
1218    EmitBlock(NonZeroBlock);
1219  }
1220
1221  llvm::BasicBlock *BadCastBlock = 0;
1222
1223  const llvm::Type *PtrDiffTy = ConvertType(getContext().getPointerDiffType());
1224
1225  // See if this is a dynamic_cast(void*)
1226  if (ToVoid) {
1227    llvm::Value *This = V;
1228    V = Builder.CreateBitCast(This, PtrDiffTy->getPointerTo()->getPointerTo());
1229    V = Builder.CreateLoad(V, "vtable");
1230    V = Builder.CreateConstInBoundsGEP1_64(V, -2ULL);
1231    V = Builder.CreateLoad(V, "offset to top");
1232    This = Builder.CreateBitCast(This, llvm::Type::getInt8PtrTy(VMContext));
1233    V = Builder.CreateInBoundsGEP(This, V);
1234    V = Builder.CreateBitCast(V, LTy);
1235  } else {
1236    /// Call __dynamic_cast
1237    const llvm::Type *ResultType = llvm::Type::getInt8PtrTy(VMContext);
1238    const llvm::FunctionType *FTy;
1239    std::vector<const llvm::Type*> ArgTys;
1240    const llvm::Type *PtrToInt8Ty
1241      = llvm::Type::getInt8Ty(VMContext)->getPointerTo();
1242    ArgTys.push_back(PtrToInt8Ty);
1243    ArgTys.push_back(PtrToInt8Ty);
1244    ArgTys.push_back(PtrToInt8Ty);
1245    ArgTys.push_back(PtrDiffTy);
1246    FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
1247
1248    // FIXME: Calculate better hint.
1249    llvm::Value *hint = llvm::ConstantInt::get(PtrDiffTy, -1ULL);
1250
1251    assert(SrcTy->isRecordType() && "Src type must be record type!");
1252    assert(DestTy->isRecordType() && "Dest type must be record type!");
1253
1254    llvm::Value *SrcArg
1255      = CGM.GetAddrOfRTTIDescriptor(SrcTy.getUnqualifiedType());
1256    llvm::Value *DestArg
1257      = CGM.GetAddrOfRTTIDescriptor(DestTy.getUnqualifiedType());
1258
1259    V = Builder.CreateBitCast(V, PtrToInt8Ty);
1260    V = Builder.CreateCall4(CGM.CreateRuntimeFunction(FTy, "__dynamic_cast"),
1261                            V, SrcArg, DestArg, hint);
1262    V = Builder.CreateBitCast(V, LTy);
1263
1264    if (ThrowOnBad) {
1265      BadCastBlock = createBasicBlock();
1266      Builder.CreateCondBr(Builder.CreateIsNotNull(V), ContBlock, BadCastBlock);
1267      EmitBlock(BadCastBlock);
1268      /// Invoke __cxa_bad_cast
1269      ResultType = llvm::Type::getVoidTy(VMContext);
1270      const llvm::FunctionType *FBadTy;
1271      FBadTy = llvm::FunctionType::get(ResultType, false);
1272      llvm::Value *F = CGM.CreateRuntimeFunction(FBadTy, "__cxa_bad_cast");
1273      if (llvm::BasicBlock *InvokeDest = getInvokeDest()) {
1274        llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
1275        Builder.CreateInvoke(F, Cont, InvokeDest)->setDoesNotReturn();
1276        EmitBlock(Cont);
1277      } else {
1278        // FIXME: Does this ever make sense?
1279        Builder.CreateCall(F)->setDoesNotReturn();
1280      }
1281      Builder.CreateUnreachable();
1282    }
1283  }
1284
1285  if (CanBeZero) {
1286    Builder.CreateBr(ContBlock);
1287    EmitBlock(NullBlock);
1288    Builder.CreateBr(ContBlock);
1289  }
1290  EmitBlock(ContBlock);
1291  if (CanBeZero) {
1292    llvm::PHINode *PHI = Builder.CreatePHI(LTy);
1293    PHI->reserveOperandSpace(2);
1294    PHI->addIncoming(V, NonZeroBlock);
1295    PHI->addIncoming(llvm::Constant::getNullValue(LTy), NullBlock);
1296    V = PHI;
1297  }
1298
1299  return V;
1300}
1301