CGExprCXX.cpp revision 972edf0534d8a50f87fac1d0ff34eb22f593df11
1//===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This contains code dealing with code generation of C++ expressions
11//
12//===----------------------------------------------------------------------===//
13
14#include "clang/Frontend/CodeGenOptions.h"
15#include "CodeGenFunction.h"
16#include "CGCUDARuntime.h"
17#include "CGCXXABI.h"
18#include "CGObjCRuntime.h"
19#include "CGDebugInfo.h"
20#include "llvm/Intrinsics.h"
21#include "llvm/Support/CallSite.h"
22
23using namespace clang;
24using namespace CodeGen;
25
26RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
27                                          llvm::Value *Callee,
28                                          ReturnValueSlot ReturnValue,
29                                          llvm::Value *This,
30                                          llvm::Value *VTT,
31                                          CallExpr::const_arg_iterator ArgBeg,
32                                          CallExpr::const_arg_iterator ArgEnd) {
33  assert(MD->isInstance() &&
34         "Trying to emit a member call expr on a static method!");
35
36  CallArgList Args;
37
38  // Push the this ptr.
39  Args.add(RValue::get(This), MD->getThisType(getContext()));
40
41  // If there is a VTT parameter, emit it.
42  if (VTT) {
43    QualType T = getContext().getPointerType(getContext().VoidPtrTy);
44    Args.add(RValue::get(VTT), T);
45  }
46
47  const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
48  RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size());
49
50  // And the rest of the call args.
51  EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
52
53  return EmitCall(CGM.getTypes().arrangeFunctionCall(FPT->getResultType(), Args,
54                                                     FPT->getExtInfo(),
55                                                     required),
56                  Callee, ReturnValue, Args, MD);
57}
58
59static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) {
60  const Expr *E = Base;
61
62  while (true) {
63    E = E->IgnoreParens();
64    if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
65      if (CE->getCastKind() == CK_DerivedToBase ||
66          CE->getCastKind() == CK_UncheckedDerivedToBase ||
67          CE->getCastKind() == CK_NoOp) {
68        E = CE->getSubExpr();
69        continue;
70      }
71    }
72
73    break;
74  }
75
76  QualType DerivedType = E->getType();
77  if (const PointerType *PTy = DerivedType->getAs<PointerType>())
78    DerivedType = PTy->getPointeeType();
79
80  return cast<CXXRecordDecl>(DerivedType->castAs<RecordType>()->getDecl());
81}
82
83// FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
84// quite what we want.
85static const Expr *skipNoOpCastsAndParens(const Expr *E) {
86  while (true) {
87    if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
88      E = PE->getSubExpr();
89      continue;
90    }
91
92    if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
93      if (CE->getCastKind() == CK_NoOp) {
94        E = CE->getSubExpr();
95        continue;
96      }
97    }
98    if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
99      if (UO->getOpcode() == UO_Extension) {
100        E = UO->getSubExpr();
101        continue;
102      }
103    }
104    return E;
105  }
106}
107
108/// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
109/// expr can be devirtualized.
110static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
111                                               const Expr *Base,
112                                               const CXXMethodDecl *MD) {
113
114  // When building with -fapple-kext, all calls must go through the vtable since
115  // the kernel linker can do runtime patching of vtables.
116  if (Context.getLangOptions().AppleKext)
117    return false;
118
119  // If the most derived class is marked final, we know that no subclass can
120  // override this member function and so we can devirtualize it. For example:
121  //
122  // struct A { virtual void f(); }
123  // struct B final : A { };
124  //
125  // void f(B *b) {
126  //   b->f();
127  // }
128  //
129  const CXXRecordDecl *MostDerivedClassDecl = getMostDerivedClassDecl(Base);
130  if (MostDerivedClassDecl->hasAttr<FinalAttr>())
131    return true;
132
133  // If the member function is marked 'final', we know that it can't be
134  // overridden and can therefore devirtualize it.
135  if (MD->hasAttr<FinalAttr>())
136    return true;
137
138  // Similarly, if the class itself is marked 'final' it can't be overridden
139  // and we can therefore devirtualize the member function call.
140  if (MD->getParent()->hasAttr<FinalAttr>())
141    return true;
142
143  Base = skipNoOpCastsAndParens(Base);
144  if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
145    if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
146      // This is a record decl. We know the type and can devirtualize it.
147      return VD->getType()->isRecordType();
148    }
149
150    return false;
151  }
152
153  // We can always devirtualize calls on temporary object expressions.
154  if (isa<CXXConstructExpr>(Base))
155    return true;
156
157  // And calls on bound temporaries.
158  if (isa<CXXBindTemporaryExpr>(Base))
159    return true;
160
161  // Check if this is a call expr that returns a record type.
162  if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
163    return CE->getCallReturnType()->isRecordType();
164
165  // We can't devirtualize the call.
166  return false;
167}
168
169// Note: This function also emit constructor calls to support a MSVC
170// extensions allowing explicit constructor function call.
171RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
172                                              ReturnValueSlot ReturnValue) {
173  const Expr *callee = CE->getCallee()->IgnoreParens();
174
175  if (isa<BinaryOperator>(callee))
176    return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
177
178  const MemberExpr *ME = cast<MemberExpr>(callee);
179  const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
180
181  CGDebugInfo *DI = getDebugInfo();
182  if (DI && CGM.getCodeGenOpts().LimitDebugInfo
183      && !isa<CallExpr>(ME->getBase())) {
184    QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType();
185    if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) {
186      DI->getOrCreateRecordType(PTy->getPointeeType(),
187                                MD->getParent()->getLocation());
188    }
189  }
190
191  if (MD->isStatic()) {
192    // The method is static, emit it as we would a regular call.
193    llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
194    return EmitCall(getContext().getPointerType(MD->getType()), Callee,
195                    ReturnValue, CE->arg_begin(), CE->arg_end());
196  }
197
198  // Compute the object pointer.
199  llvm::Value *This;
200  if (ME->isArrow())
201    This = EmitScalarExpr(ME->getBase());
202  else
203    This = EmitLValue(ME->getBase()).getAddress();
204
205  if (MD->isTrivial()) {
206    if (isa<CXXDestructorDecl>(MD)) return RValue::get(0);
207    if (isa<CXXConstructorDecl>(MD) &&
208        cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
209      return RValue::get(0);
210
211    if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
212      // We don't like to generate the trivial copy/move assignment operator
213      // when it isn't necessary; just produce the proper effect here.
214      llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
215      EmitAggregateCopy(This, RHS, CE->getType());
216      return RValue::get(This);
217    }
218
219    if (isa<CXXConstructorDecl>(MD) &&
220        cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
221      // Trivial move and copy ctor are the same.
222      llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
223      EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS,
224                                     CE->arg_begin(), CE->arg_end());
225      return RValue::get(This);
226    }
227    llvm_unreachable("unknown trivial member function");
228  }
229
230  // Compute the function type we're calling.
231  const CGFunctionInfo *FInfo = 0;
232  if (isa<CXXDestructorDecl>(MD))
233    FInfo = &CGM.getTypes().arrangeCXXDestructor(cast<CXXDestructorDecl>(MD),
234                                                 Dtor_Complete);
235  else if (isa<CXXConstructorDecl>(MD))
236    FInfo = &CGM.getTypes().arrangeCXXConstructorDeclaration(
237                                                 cast<CXXConstructorDecl>(MD),
238                                                 Ctor_Complete);
239  else
240    FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(MD);
241
242  llvm::Type *Ty = CGM.getTypes().GetFunctionType(*FInfo);
243
244  // C++ [class.virtual]p12:
245  //   Explicit qualification with the scope operator (5.1) suppresses the
246  //   virtual call mechanism.
247  //
248  // We also don't emit a virtual call if the base expression has a record type
249  // because then we know what the type is.
250  bool UseVirtualCall;
251  UseVirtualCall = MD->isVirtual() && !ME->hasQualifier()
252                   && !canDevirtualizeMemberFunctionCalls(getContext(),
253                                                          ME->getBase(), MD);
254  llvm::Value *Callee;
255  if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
256    if (UseVirtualCall) {
257      Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty);
258    } else {
259      if (getContext().getLangOptions().AppleKext &&
260          MD->isVirtual() &&
261          ME->hasQualifier())
262        Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
263      else
264        Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty);
265    }
266  } else if (const CXXConstructorDecl *Ctor =
267               dyn_cast<CXXConstructorDecl>(MD)) {
268    Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
269  } else if (UseVirtualCall) {
270      Callee = BuildVirtualCall(MD, This, Ty);
271  } else {
272    if (getContext().getLangOptions().AppleKext &&
273        MD->isVirtual() &&
274        ME->hasQualifier())
275      Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
276    else
277      Callee = CGM.GetAddrOfFunction(MD, Ty);
278  }
279
280  return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
281                           CE->arg_begin(), CE->arg_end());
282}
283
284RValue
285CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
286                                              ReturnValueSlot ReturnValue) {
287  const BinaryOperator *BO =
288      cast<BinaryOperator>(E->getCallee()->IgnoreParens());
289  const Expr *BaseExpr = BO->getLHS();
290  const Expr *MemFnExpr = BO->getRHS();
291
292  const MemberPointerType *MPT =
293    MemFnExpr->getType()->castAs<MemberPointerType>();
294
295  const FunctionProtoType *FPT =
296    MPT->getPointeeType()->castAs<FunctionProtoType>();
297  const CXXRecordDecl *RD =
298    cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
299
300  // Get the member function pointer.
301  llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
302
303  // Emit the 'this' pointer.
304  llvm::Value *This;
305
306  if (BO->getOpcode() == BO_PtrMemI)
307    This = EmitScalarExpr(BaseExpr);
308  else
309    This = EmitLValue(BaseExpr).getAddress();
310
311  // Ask the ABI to load the callee.  Note that This is modified.
312  llvm::Value *Callee =
313    CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, This, MemFnPtr, MPT);
314
315  CallArgList Args;
316
317  QualType ThisType =
318    getContext().getPointerType(getContext().getTagDeclType(RD));
319
320  // Push the this ptr.
321  Args.add(RValue::get(This), ThisType);
322
323  // And the rest of the call args
324  EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
325  return EmitCall(CGM.getTypes().arrangeFunctionCall(Args, FPT), Callee,
326                  ReturnValue, Args);
327}
328
329RValue
330CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
331                                               const CXXMethodDecl *MD,
332                                               ReturnValueSlot ReturnValue) {
333  assert(MD->isInstance() &&
334         "Trying to emit a member call expr on a static method!");
335  LValue LV = EmitLValue(E->getArg(0));
336  llvm::Value *This = LV.getAddress();
337
338  if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
339      MD->isTrivial()) {
340    llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
341    QualType Ty = E->getType();
342    EmitAggregateCopy(This, Src, Ty);
343    return RValue::get(This);
344  }
345
346  llvm::Value *Callee = EmitCXXOperatorMemberCallee(E, MD, This);
347  return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
348                           E->arg_begin() + 1, E->arg_end());
349}
350
351RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
352                                               ReturnValueSlot ReturnValue) {
353  return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue);
354}
355
356static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
357                                            llvm::Value *DestPtr,
358                                            const CXXRecordDecl *Base) {
359  if (Base->isEmpty())
360    return;
361
362  DestPtr = CGF.EmitCastToVoidPtr(DestPtr);
363
364  const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
365  CharUnits Size = Layout.getNonVirtualSize();
366  CharUnits Align = Layout.getNonVirtualAlign();
367
368  llvm::Value *SizeVal = CGF.CGM.getSize(Size);
369
370  // If the type contains a pointer to data member we can't memset it to zero.
371  // Instead, create a null constant and copy it to the destination.
372  // TODO: there are other patterns besides zero that we can usefully memset,
373  // like -1, which happens to be the pattern used by member-pointers.
374  // TODO: isZeroInitializable can be over-conservative in the case where a
375  // virtual base contains a member pointer.
376  if (!CGF.CGM.getTypes().isZeroInitializable(Base)) {
377    llvm::Constant *NullConstant = CGF.CGM.EmitNullConstantForBase(Base);
378
379    llvm::GlobalVariable *NullVariable =
380      new llvm::GlobalVariable(CGF.CGM.getModule(), NullConstant->getType(),
381                               /*isConstant=*/true,
382                               llvm::GlobalVariable::PrivateLinkage,
383                               NullConstant, Twine());
384    NullVariable->setAlignment(Align.getQuantity());
385    llvm::Value *SrcPtr = CGF.EmitCastToVoidPtr(NullVariable);
386
387    // Get and call the appropriate llvm.memcpy overload.
388    CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity());
389    return;
390  }
391
392  // Otherwise, just memset the whole thing to zero.  This is legal
393  // because in LLVM, all default initializers (other than the ones we just
394  // handled above) are guaranteed to have a bit pattern of all zeros.
395  CGF.Builder.CreateMemSet(DestPtr, CGF.Builder.getInt8(0), SizeVal,
396                           Align.getQuantity());
397}
398
399void
400CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
401                                      AggValueSlot Dest) {
402  assert(!Dest.isIgnored() && "Must have a destination!");
403  const CXXConstructorDecl *CD = E->getConstructor();
404
405  // If we require zero initialization before (or instead of) calling the
406  // constructor, as can be the case with a non-user-provided default
407  // constructor, emit the zero initialization now, unless destination is
408  // already zeroed.
409  if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
410    switch (E->getConstructionKind()) {
411    case CXXConstructExpr::CK_Delegating:
412      assert(0 && "Delegating constructor should not need zeroing");
413    case CXXConstructExpr::CK_Complete:
414      EmitNullInitialization(Dest.getAddr(), E->getType());
415      break;
416    case CXXConstructExpr::CK_VirtualBase:
417    case CXXConstructExpr::CK_NonVirtualBase:
418      EmitNullBaseClassInitialization(*this, Dest.getAddr(), CD->getParent());
419      break;
420    }
421  }
422
423  // If this is a call to a trivial default constructor, do nothing.
424  if (CD->isTrivial() && CD->isDefaultConstructor())
425    return;
426
427  // Elide the constructor if we're constructing from a temporary.
428  // The temporary check is required because Sema sets this on NRVO
429  // returns.
430  if (getContext().getLangOptions().ElideConstructors && E->isElidable()) {
431    assert(getContext().hasSameUnqualifiedType(E->getType(),
432                                               E->getArg(0)->getType()));
433    if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
434      EmitAggExpr(E->getArg(0), Dest);
435      return;
436    }
437  }
438
439  if (const ConstantArrayType *arrayType
440        = getContext().getAsConstantArrayType(E->getType())) {
441    EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddr(),
442                               E->arg_begin(), E->arg_end());
443  } else {
444    CXXCtorType Type = Ctor_Complete;
445    bool ForVirtualBase = false;
446
447    switch (E->getConstructionKind()) {
448     case CXXConstructExpr::CK_Delegating:
449      // We should be emitting a constructor; GlobalDecl will assert this
450      Type = CurGD.getCtorType();
451      break;
452
453     case CXXConstructExpr::CK_Complete:
454      Type = Ctor_Complete;
455      break;
456
457     case CXXConstructExpr::CK_VirtualBase:
458      ForVirtualBase = true;
459      // fall-through
460
461     case CXXConstructExpr::CK_NonVirtualBase:
462      Type = Ctor_Base;
463    }
464
465    // Call the constructor.
466    EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest.getAddr(),
467                           E->arg_begin(), E->arg_end());
468  }
469}
470
471void
472CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
473                                            llvm::Value *Src,
474                                            const Expr *Exp) {
475  if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
476    Exp = E->getSubExpr();
477  assert(isa<CXXConstructExpr>(Exp) &&
478         "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
479  const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
480  const CXXConstructorDecl *CD = E->getConstructor();
481  RunCleanupsScope Scope(*this);
482
483  // If we require zero initialization before (or instead of) calling the
484  // constructor, as can be the case with a non-user-provided default
485  // constructor, emit the zero initialization now.
486  // FIXME. Do I still need this for a copy ctor synthesis?
487  if (E->requiresZeroInitialization())
488    EmitNullInitialization(Dest, E->getType());
489
490  assert(!getContext().getAsConstantArrayType(E->getType())
491         && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
492  EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src,
493                                 E->arg_begin(), E->arg_end());
494}
495
496static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
497                                        const CXXNewExpr *E) {
498  if (!E->isArray())
499    return CharUnits::Zero();
500
501  // No cookie is required if the operator new[] being used is the
502  // reserved placement operator new[].
503  if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
504    return CharUnits::Zero();
505
506  return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
507}
508
509static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
510                                        const CXXNewExpr *e,
511                                        llvm::Value *&numElements,
512                                        llvm::Value *&sizeWithoutCookie) {
513  QualType type = e->getAllocatedType();
514
515  if (!e->isArray()) {
516    CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
517    sizeWithoutCookie
518      = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
519    return sizeWithoutCookie;
520  }
521
522  // The width of size_t.
523  unsigned sizeWidth = CGF.SizeTy->getBitWidth();
524
525  // Figure out the cookie size.
526  llvm::APInt cookieSize(sizeWidth,
527                         CalculateCookiePadding(CGF, e).getQuantity());
528
529  // Emit the array size expression.
530  // We multiply the size of all dimensions for NumElements.
531  // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
532  numElements = CGF.EmitScalarExpr(e->getArraySize());
533  assert(isa<llvm::IntegerType>(numElements->getType()));
534
535  // The number of elements can be have an arbitrary integer type;
536  // essentially, we need to multiply it by a constant factor, add a
537  // cookie size, and verify that the result is representable as a
538  // size_t.  That's just a gloss, though, and it's wrong in one
539  // important way: if the count is negative, it's an error even if
540  // the cookie size would bring the total size >= 0.
541  bool isSigned
542    = e->getArraySize()->getType()->isSignedIntegerOrEnumerationType();
543  llvm::IntegerType *numElementsType
544    = cast<llvm::IntegerType>(numElements->getType());
545  unsigned numElementsWidth = numElementsType->getBitWidth();
546
547  // Compute the constant factor.
548  llvm::APInt arraySizeMultiplier(sizeWidth, 1);
549  while (const ConstantArrayType *CAT
550             = CGF.getContext().getAsConstantArrayType(type)) {
551    type = CAT->getElementType();
552    arraySizeMultiplier *= CAT->getSize();
553  }
554
555  CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
556  llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
557  typeSizeMultiplier *= arraySizeMultiplier;
558
559  // This will be a size_t.
560  llvm::Value *size;
561
562  // If someone is doing 'new int[42]' there is no need to do a dynamic check.
563  // Don't bloat the -O0 code.
564  if (llvm::ConstantInt *numElementsC =
565        dyn_cast<llvm::ConstantInt>(numElements)) {
566    const llvm::APInt &count = numElementsC->getValue();
567
568    bool hasAnyOverflow = false;
569
570    // If 'count' was a negative number, it's an overflow.
571    if (isSigned && count.isNegative())
572      hasAnyOverflow = true;
573
574    // We want to do all this arithmetic in size_t.  If numElements is
575    // wider than that, check whether it's already too big, and if so,
576    // overflow.
577    else if (numElementsWidth > sizeWidth &&
578             numElementsWidth - sizeWidth > count.countLeadingZeros())
579      hasAnyOverflow = true;
580
581    // Okay, compute a count at the right width.
582    llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
583
584    // Scale numElements by that.  This might overflow, but we don't
585    // care because it only overflows if allocationSize does, too, and
586    // if that overflows then we shouldn't use this.
587    numElements = llvm::ConstantInt::get(CGF.SizeTy,
588                                         adjustedCount * arraySizeMultiplier);
589
590    // Compute the size before cookie, and track whether it overflowed.
591    bool overflow;
592    llvm::APInt allocationSize
593      = adjustedCount.umul_ov(typeSizeMultiplier, overflow);
594    hasAnyOverflow |= overflow;
595
596    // Add in the cookie, and check whether it's overflowed.
597    if (cookieSize != 0) {
598      // Save the current size without a cookie.  This shouldn't be
599      // used if there was overflow.
600      sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
601
602      allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
603      hasAnyOverflow |= overflow;
604    }
605
606    // On overflow, produce a -1 so operator new will fail.
607    if (hasAnyOverflow) {
608      size = llvm::Constant::getAllOnesValue(CGF.SizeTy);
609    } else {
610      size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
611    }
612
613  // Otherwise, we might need to use the overflow intrinsics.
614  } else {
615    // There are up to four conditions we need to test for:
616    // 1) if isSigned, we need to check whether numElements is negative;
617    // 2) if numElementsWidth > sizeWidth, we need to check whether
618    //   numElements is larger than something representable in size_t;
619    // 3) we need to compute
620    //      sizeWithoutCookie := numElements * typeSizeMultiplier
621    //    and check whether it overflows; and
622    // 4) if we need a cookie, we need to compute
623    //      size := sizeWithoutCookie + cookieSize
624    //    and check whether it overflows.
625
626    llvm::Value *hasOverflow = 0;
627
628    // If numElementsWidth > sizeWidth, then one way or another, we're
629    // going to have to do a comparison for (2), and this happens to
630    // take care of (1), too.
631    if (numElementsWidth > sizeWidth) {
632      llvm::APInt threshold(numElementsWidth, 1);
633      threshold <<= sizeWidth;
634
635      llvm::Value *thresholdV
636        = llvm::ConstantInt::get(numElementsType, threshold);
637
638      hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);
639      numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);
640
641    // Otherwise, if we're signed, we want to sext up to size_t.
642    } else if (isSigned) {
643      if (numElementsWidth < sizeWidth)
644        numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);
645
646      // If there's a non-1 type size multiplier, then we can do the
647      // signedness check at the same time as we do the multiply
648      // because a negative number times anything will cause an
649      // unsigned overflow.  Otherwise, we have to do it here.
650      if (typeSizeMultiplier == 1)
651        hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
652                                      llvm::ConstantInt::get(CGF.SizeTy, 0));
653
654    // Otherwise, zext up to size_t if necessary.
655    } else if (numElementsWidth < sizeWidth) {
656      numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);
657    }
658
659    assert(numElements->getType() == CGF.SizeTy);
660
661    size = numElements;
662
663    // Multiply by the type size if necessary.  This multiplier
664    // includes all the factors for nested arrays.
665    //
666    // This step also causes numElements to be scaled up by the
667    // nested-array factor if necessary.  Overflow on this computation
668    // can be ignored because the result shouldn't be used if
669    // allocation fails.
670    if (typeSizeMultiplier != 1) {
671      llvm::Value *umul_with_overflow
672        = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
673
674      llvm::Value *tsmV =
675        llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
676      llvm::Value *result =
677        CGF.Builder.CreateCall2(umul_with_overflow, size, tsmV);
678
679      llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
680      if (hasOverflow)
681        hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
682      else
683        hasOverflow = overflowed;
684
685      size = CGF.Builder.CreateExtractValue(result, 0);
686
687      // Also scale up numElements by the array size multiplier.
688      if (arraySizeMultiplier != 1) {
689        // If the base element type size is 1, then we can re-use the
690        // multiply we just did.
691        if (typeSize.isOne()) {
692          assert(arraySizeMultiplier == typeSizeMultiplier);
693          numElements = size;
694
695        // Otherwise we need a separate multiply.
696        } else {
697          llvm::Value *asmV =
698            llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
699          numElements = CGF.Builder.CreateMul(numElements, asmV);
700        }
701      }
702    } else {
703      // numElements doesn't need to be scaled.
704      assert(arraySizeMultiplier == 1);
705    }
706
707    // Add in the cookie size if necessary.
708    if (cookieSize != 0) {
709      sizeWithoutCookie = size;
710
711      llvm::Value *uadd_with_overflow
712        = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
713
714      llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
715      llvm::Value *result =
716        CGF.Builder.CreateCall2(uadd_with_overflow, size, cookieSizeV);
717
718      llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
719      if (hasOverflow)
720        hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
721      else
722        hasOverflow = overflowed;
723
724      size = CGF.Builder.CreateExtractValue(result, 0);
725    }
726
727    // If we had any possibility of dynamic overflow, make a select to
728    // overwrite 'size' with an all-ones value, which should cause
729    // operator new to throw.
730    if (hasOverflow)
731      size = CGF.Builder.CreateSelect(hasOverflow,
732                                 llvm::Constant::getAllOnesValue(CGF.SizeTy),
733                                      size);
734  }
735
736  if (cookieSize == 0)
737    sizeWithoutCookie = size;
738  else
739    assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
740
741  return size;
742}
743
744static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E,
745                                    llvm::Value *NewPtr) {
746
747  const Expr *Init = E->getInitializer();
748  QualType AllocType = E->getAllocatedType();
749
750  CharUnits Alignment = CGF.getContext().getTypeAlignInChars(AllocType);
751  if (!CGF.hasAggregateLLVMType(AllocType))
752    CGF.EmitScalarInit(Init, 0, CGF.MakeAddrLValue(NewPtr, AllocType,
753                                                   Alignment),
754                       false);
755  else if (AllocType->isAnyComplexType())
756    CGF.EmitComplexExprIntoAddr(Init, NewPtr,
757                                AllocType.isVolatileQualified());
758  else {
759    AggValueSlot Slot
760      = AggValueSlot::forAddr(NewPtr, Alignment, AllocType.getQualifiers(),
761                              AggValueSlot::IsDestructed,
762                              AggValueSlot::DoesNotNeedGCBarriers,
763                              AggValueSlot::IsNotAliased);
764    CGF.EmitAggExpr(Init, Slot);
765
766    CGF.MaybeEmitStdInitializerListCleanup(NewPtr, Init);
767  }
768}
769
770void
771CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
772                                         QualType elementType,
773                                         llvm::Value *beginPtr,
774                                         llvm::Value *numElements) {
775  if (!E->hasInitializer())
776    return; // We have a POD type.
777
778  // Check if the number of elements is constant.
779  bool checkZero = true;
780  if (llvm::ConstantInt *constNum = dyn_cast<llvm::ConstantInt>(numElements)) {
781    // If it's constant zero, skip the whole loop.
782    if (constNum->isZero()) return;
783
784    checkZero = false;
785  }
786
787  // Find the end of the array, hoisted out of the loop.
788  llvm::Value *endPtr =
789    Builder.CreateInBoundsGEP(beginPtr, numElements, "array.end");
790
791  // Create the continuation block.
792  llvm::BasicBlock *contBB = createBasicBlock("new.loop.end");
793
794  // If we need to check for zero, do so now.
795  if (checkZero) {
796    llvm::BasicBlock *nonEmptyBB = createBasicBlock("new.loop.nonempty");
797    llvm::Value *isEmpty = Builder.CreateICmpEQ(beginPtr, endPtr,
798                                                "array.isempty");
799    Builder.CreateCondBr(isEmpty, contBB, nonEmptyBB);
800    EmitBlock(nonEmptyBB);
801  }
802
803  // Enter the loop.
804  llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
805  llvm::BasicBlock *loopBB = createBasicBlock("new.loop");
806
807  EmitBlock(loopBB);
808
809  // Set up the current-element phi.
810  llvm::PHINode *curPtr =
811    Builder.CreatePHI(beginPtr->getType(), 2, "array.cur");
812  curPtr->addIncoming(beginPtr, entryBB);
813
814  // Enter a partial-destruction cleanup if necessary.
815  QualType::DestructionKind dtorKind = elementType.isDestructedType();
816  EHScopeStack::stable_iterator cleanup;
817  llvm::Instruction *cleanupDominator = 0;
818  if (needsEHCleanup(dtorKind)) {
819    pushRegularPartialArrayCleanup(beginPtr, curPtr, elementType,
820                                   getDestroyer(dtorKind));
821    cleanup = EHStack.stable_begin();
822    cleanupDominator = Builder.CreateUnreachable();
823  }
824
825  // Emit the initializer into this element.
826  StoreAnyExprIntoOneUnit(*this, E, curPtr);
827
828  // Leave the cleanup if we entered one.
829  if (cleanupDominator) {
830    DeactivateCleanupBlock(cleanup, cleanupDominator);
831    cleanupDominator->eraseFromParent();
832  }
833
834  // Advance to the next element.
835  llvm::Value *nextPtr = Builder.CreateConstGEP1_32(curPtr, 1, "array.next");
836
837  // Check whether we've gotten to the end of the array and, if so,
838  // exit the loop.
839  llvm::Value *isEnd = Builder.CreateICmpEQ(nextPtr, endPtr, "array.atend");
840  Builder.CreateCondBr(isEnd, contBB, loopBB);
841  curPtr->addIncoming(nextPtr, Builder.GetInsertBlock());
842
843  EmitBlock(contBB);
844}
845
846static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
847                           llvm::Value *NewPtr, llvm::Value *Size) {
848  CGF.EmitCastToVoidPtr(NewPtr);
849  CharUnits Alignment = CGF.getContext().getTypeAlignInChars(T);
850  CGF.Builder.CreateMemSet(NewPtr, CGF.Builder.getInt8(0), Size,
851                           Alignment.getQuantity(), false);
852}
853
854static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
855                               QualType ElementType,
856                               llvm::Value *NewPtr,
857                               llvm::Value *NumElements,
858                               llvm::Value *AllocSizeWithoutCookie) {
859  const Expr *Init = E->getInitializer();
860  if (E->isArray()) {
861    if (const CXXConstructExpr *CCE = dyn_cast_or_null<CXXConstructExpr>(Init)){
862      CXXConstructorDecl *Ctor = CCE->getConstructor();
863      bool RequiresZeroInitialization = false;
864      if (Ctor->getParent()->hasTrivialDefaultConstructor()) {
865        // If new expression did not specify value-initialization, then there
866        // is no initialization.
867        if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty())
868          return;
869
870        if (CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
871          // Optimization: since zero initialization will just set the memory
872          // to all zeroes, generate a single memset to do it in one shot.
873          EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
874          return;
875        }
876
877        RequiresZeroInitialization = true;
878      }
879
880      CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
881                                     CCE->arg_begin(),  CCE->arg_end(),
882                                     RequiresZeroInitialization);
883      return;
884    } else if (Init && isa<ImplicitValueInitExpr>(Init) &&
885               CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
886      // Optimization: since zero initialization will just set the memory
887      // to all zeroes, generate a single memset to do it in one shot.
888      EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
889      return;
890    }
891    CGF.EmitNewArrayInitializer(E, ElementType, NewPtr, NumElements);
892    return;
893  }
894
895  if (!Init)
896    return;
897
898  StoreAnyExprIntoOneUnit(CGF, E, NewPtr);
899}
900
901namespace {
902  /// A cleanup to call the given 'operator delete' function upon
903  /// abnormal exit from a new expression.
904  class CallDeleteDuringNew : public EHScopeStack::Cleanup {
905    size_t NumPlacementArgs;
906    const FunctionDecl *OperatorDelete;
907    llvm::Value *Ptr;
908    llvm::Value *AllocSize;
909
910    RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
911
912  public:
913    static size_t getExtraSize(size_t NumPlacementArgs) {
914      return NumPlacementArgs * sizeof(RValue);
915    }
916
917    CallDeleteDuringNew(size_t NumPlacementArgs,
918                        const FunctionDecl *OperatorDelete,
919                        llvm::Value *Ptr,
920                        llvm::Value *AllocSize)
921      : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
922        Ptr(Ptr), AllocSize(AllocSize) {}
923
924    void setPlacementArg(unsigned I, RValue Arg) {
925      assert(I < NumPlacementArgs && "index out of range");
926      getPlacementArgs()[I] = Arg;
927    }
928
929    void Emit(CodeGenFunction &CGF, Flags flags) {
930      const FunctionProtoType *FPT
931        = OperatorDelete->getType()->getAs<FunctionProtoType>();
932      assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
933             (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
934
935      CallArgList DeleteArgs;
936
937      // The first argument is always a void*.
938      FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
939      DeleteArgs.add(RValue::get(Ptr), *AI++);
940
941      // A member 'operator delete' can take an extra 'size_t' argument.
942      if (FPT->getNumArgs() == NumPlacementArgs + 2)
943        DeleteArgs.add(RValue::get(AllocSize), *AI++);
944
945      // Pass the rest of the arguments, which must match exactly.
946      for (unsigned I = 0; I != NumPlacementArgs; ++I)
947        DeleteArgs.add(getPlacementArgs()[I], *AI++);
948
949      // Call 'operator delete'.
950      CGF.EmitCall(CGF.CGM.getTypes().arrangeFunctionCall(DeleteArgs, FPT),
951                   CGF.CGM.GetAddrOfFunction(OperatorDelete),
952                   ReturnValueSlot(), DeleteArgs, OperatorDelete);
953    }
954  };
955
956  /// A cleanup to call the given 'operator delete' function upon
957  /// abnormal exit from a new expression when the new expression is
958  /// conditional.
959  class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup {
960    size_t NumPlacementArgs;
961    const FunctionDecl *OperatorDelete;
962    DominatingValue<RValue>::saved_type Ptr;
963    DominatingValue<RValue>::saved_type AllocSize;
964
965    DominatingValue<RValue>::saved_type *getPlacementArgs() {
966      return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
967    }
968
969  public:
970    static size_t getExtraSize(size_t NumPlacementArgs) {
971      return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
972    }
973
974    CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
975                                   const FunctionDecl *OperatorDelete,
976                                   DominatingValue<RValue>::saved_type Ptr,
977                              DominatingValue<RValue>::saved_type AllocSize)
978      : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
979        Ptr(Ptr), AllocSize(AllocSize) {}
980
981    void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
982      assert(I < NumPlacementArgs && "index out of range");
983      getPlacementArgs()[I] = Arg;
984    }
985
986    void Emit(CodeGenFunction &CGF, Flags flags) {
987      const FunctionProtoType *FPT
988        = OperatorDelete->getType()->getAs<FunctionProtoType>();
989      assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
990             (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
991
992      CallArgList DeleteArgs;
993
994      // The first argument is always a void*.
995      FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
996      DeleteArgs.add(Ptr.restore(CGF), *AI++);
997
998      // A member 'operator delete' can take an extra 'size_t' argument.
999      if (FPT->getNumArgs() == NumPlacementArgs + 2) {
1000        RValue RV = AllocSize.restore(CGF);
1001        DeleteArgs.add(RV, *AI++);
1002      }
1003
1004      // Pass the rest of the arguments, which must match exactly.
1005      for (unsigned I = 0; I != NumPlacementArgs; ++I) {
1006        RValue RV = getPlacementArgs()[I].restore(CGF);
1007        DeleteArgs.add(RV, *AI++);
1008      }
1009
1010      // Call 'operator delete'.
1011      CGF.EmitCall(CGF.CGM.getTypes().arrangeFunctionCall(DeleteArgs, FPT),
1012                   CGF.CGM.GetAddrOfFunction(OperatorDelete),
1013                   ReturnValueSlot(), DeleteArgs, OperatorDelete);
1014    }
1015  };
1016}
1017
1018/// Enter a cleanup to call 'operator delete' if the initializer in a
1019/// new-expression throws.
1020static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
1021                                  const CXXNewExpr *E,
1022                                  llvm::Value *NewPtr,
1023                                  llvm::Value *AllocSize,
1024                                  const CallArgList &NewArgs) {
1025  // If we're not inside a conditional branch, then the cleanup will
1026  // dominate and we can do the easier (and more efficient) thing.
1027  if (!CGF.isInConditionalBranch()) {
1028    CallDeleteDuringNew *Cleanup = CGF.EHStack
1029      .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
1030                                                 E->getNumPlacementArgs(),
1031                                                 E->getOperatorDelete(),
1032                                                 NewPtr, AllocSize);
1033    for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
1034      Cleanup->setPlacementArg(I, NewArgs[I+1].RV);
1035
1036    return;
1037  }
1038
1039  // Otherwise, we need to save all this stuff.
1040  DominatingValue<RValue>::saved_type SavedNewPtr =
1041    DominatingValue<RValue>::save(CGF, RValue::get(NewPtr));
1042  DominatingValue<RValue>::saved_type SavedAllocSize =
1043    DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
1044
1045  CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
1046    .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(EHCleanup,
1047                                                 E->getNumPlacementArgs(),
1048                                                 E->getOperatorDelete(),
1049                                                 SavedNewPtr,
1050                                                 SavedAllocSize);
1051  for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
1052    Cleanup->setPlacementArg(I,
1053                     DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV));
1054
1055  CGF.initFullExprCleanup();
1056}
1057
1058llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
1059  // The element type being allocated.
1060  QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
1061
1062  // 1. Build a call to the allocation function.
1063  FunctionDecl *allocator = E->getOperatorNew();
1064  const FunctionProtoType *allocatorType =
1065    allocator->getType()->castAs<FunctionProtoType>();
1066
1067  CallArgList allocatorArgs;
1068
1069  // The allocation size is the first argument.
1070  QualType sizeType = getContext().getSizeType();
1071
1072  llvm::Value *numElements = 0;
1073  llvm::Value *allocSizeWithoutCookie = 0;
1074  llvm::Value *allocSize =
1075    EmitCXXNewAllocSize(*this, E, numElements, allocSizeWithoutCookie);
1076
1077  allocatorArgs.add(RValue::get(allocSize), sizeType);
1078
1079  // Emit the rest of the arguments.
1080  // FIXME: Ideally, this should just use EmitCallArgs.
1081  CXXNewExpr::const_arg_iterator placementArg = E->placement_arg_begin();
1082
1083  // First, use the types from the function type.
1084  // We start at 1 here because the first argument (the allocation size)
1085  // has already been emitted.
1086  for (unsigned i = 1, e = allocatorType->getNumArgs(); i != e;
1087       ++i, ++placementArg) {
1088    QualType argType = allocatorType->getArgType(i);
1089
1090    assert(getContext().hasSameUnqualifiedType(argType.getNonReferenceType(),
1091                                               placementArg->getType()) &&
1092           "type mismatch in call argument!");
1093
1094    EmitCallArg(allocatorArgs, *placementArg, argType);
1095  }
1096
1097  // Either we've emitted all the call args, or we have a call to a
1098  // variadic function.
1099  assert((placementArg == E->placement_arg_end() ||
1100          allocatorType->isVariadic()) &&
1101         "Extra arguments to non-variadic function!");
1102
1103  // If we still have any arguments, emit them using the type of the argument.
1104  for (CXXNewExpr::const_arg_iterator placementArgsEnd = E->placement_arg_end();
1105       placementArg != placementArgsEnd; ++placementArg) {
1106    EmitCallArg(allocatorArgs, *placementArg, placementArg->getType());
1107  }
1108
1109  // Emit the allocation call.  If the allocator is a global placement
1110  // operator, just "inline" it directly.
1111  RValue RV;
1112  if (allocator->isReservedGlobalPlacementOperator()) {
1113    assert(allocatorArgs.size() == 2);
1114    RV = allocatorArgs[1].RV;
1115    // TODO: kill any unnecessary computations done for the size
1116    // argument.
1117  } else {
1118    RV = EmitCall(CGM.getTypes().arrangeFunctionCall(allocatorArgs,
1119                                                     allocatorType),
1120                  CGM.GetAddrOfFunction(allocator), ReturnValueSlot(),
1121                  allocatorArgs, allocator);
1122  }
1123
1124  // Emit a null check on the allocation result if the allocation
1125  // function is allowed to return null (because it has a non-throwing
1126  // exception spec; for this part, we inline
1127  // CXXNewExpr::shouldNullCheckAllocation()) and we have an
1128  // interesting initializer.
1129  bool nullCheck = allocatorType->isNothrow(getContext()) &&
1130    (!allocType.isPODType(getContext()) || E->hasInitializer());
1131
1132  llvm::BasicBlock *nullCheckBB = 0;
1133  llvm::BasicBlock *contBB = 0;
1134
1135  llvm::Value *allocation = RV.getScalarVal();
1136  unsigned AS =
1137    cast<llvm::PointerType>(allocation->getType())->getAddressSpace();
1138
1139  // The null-check means that the initializer is conditionally
1140  // evaluated.
1141  ConditionalEvaluation conditional(*this);
1142
1143  if (nullCheck) {
1144    conditional.begin(*this);
1145
1146    nullCheckBB = Builder.GetInsertBlock();
1147    llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
1148    contBB = createBasicBlock("new.cont");
1149
1150    llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull");
1151    Builder.CreateCondBr(isNull, contBB, notNullBB);
1152    EmitBlock(notNullBB);
1153  }
1154
1155  // If there's an operator delete, enter a cleanup to call it if an
1156  // exception is thrown.
1157  EHScopeStack::stable_iterator operatorDeleteCleanup;
1158  llvm::Instruction *cleanupDominator = 0;
1159  if (E->getOperatorDelete() &&
1160      !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1161    EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs);
1162    operatorDeleteCleanup = EHStack.stable_begin();
1163    cleanupDominator = Builder.CreateUnreachable();
1164  }
1165
1166  assert((allocSize == allocSizeWithoutCookie) ==
1167         CalculateCookiePadding(*this, E).isZero());
1168  if (allocSize != allocSizeWithoutCookie) {
1169    assert(E->isArray());
1170    allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
1171                                                       numElements,
1172                                                       E, allocType);
1173  }
1174
1175  llvm::Type *elementPtrTy
1176    = ConvertTypeForMem(allocType)->getPointerTo(AS);
1177  llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy);
1178
1179  EmitNewInitializer(*this, E, allocType, result, numElements,
1180                     allocSizeWithoutCookie);
1181  if (E->isArray()) {
1182    // NewPtr is a pointer to the base element type.  If we're
1183    // allocating an array of arrays, we'll need to cast back to the
1184    // array pointer type.
1185    llvm::Type *resultType = ConvertTypeForMem(E->getType());
1186    if (result->getType() != resultType)
1187      result = Builder.CreateBitCast(result, resultType);
1188  }
1189
1190  // Deactivate the 'operator delete' cleanup if we finished
1191  // initialization.
1192  if (operatorDeleteCleanup.isValid()) {
1193    DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
1194    cleanupDominator->eraseFromParent();
1195  }
1196
1197  if (nullCheck) {
1198    conditional.end(*this);
1199
1200    llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
1201    EmitBlock(contBB);
1202
1203    llvm::PHINode *PHI = Builder.CreatePHI(result->getType(), 2);
1204    PHI->addIncoming(result, notNullBB);
1205    PHI->addIncoming(llvm::Constant::getNullValue(result->getType()),
1206                     nullCheckBB);
1207
1208    result = PHI;
1209  }
1210
1211  return result;
1212}
1213
1214void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1215                                     llvm::Value *Ptr,
1216                                     QualType DeleteTy) {
1217  assert(DeleteFD->getOverloadedOperator() == OO_Delete);
1218
1219  const FunctionProtoType *DeleteFTy =
1220    DeleteFD->getType()->getAs<FunctionProtoType>();
1221
1222  CallArgList DeleteArgs;
1223
1224  // Check if we need to pass the size to the delete operator.
1225  llvm::Value *Size = 0;
1226  QualType SizeTy;
1227  if (DeleteFTy->getNumArgs() == 2) {
1228    SizeTy = DeleteFTy->getArgType(1);
1229    CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
1230    Size = llvm::ConstantInt::get(ConvertType(SizeTy),
1231                                  DeleteTypeSize.getQuantity());
1232  }
1233
1234  QualType ArgTy = DeleteFTy->getArgType(0);
1235  llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
1236  DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
1237
1238  if (Size)
1239    DeleteArgs.add(RValue::get(Size), SizeTy);
1240
1241  // Emit the call to delete.
1242  EmitCall(CGM.getTypes().arrangeFunctionCall(DeleteArgs, DeleteFTy),
1243           CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(),
1244           DeleteArgs, DeleteFD);
1245}
1246
1247namespace {
1248  /// Calls the given 'operator delete' on a single object.
1249  struct CallObjectDelete : EHScopeStack::Cleanup {
1250    llvm::Value *Ptr;
1251    const FunctionDecl *OperatorDelete;
1252    QualType ElementType;
1253
1254    CallObjectDelete(llvm::Value *Ptr,
1255                     const FunctionDecl *OperatorDelete,
1256                     QualType ElementType)
1257      : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1258
1259    void Emit(CodeGenFunction &CGF, Flags flags) {
1260      CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
1261    }
1262  };
1263}
1264
1265/// Emit the code for deleting a single object.
1266static void EmitObjectDelete(CodeGenFunction &CGF,
1267                             const FunctionDecl *OperatorDelete,
1268                             llvm::Value *Ptr,
1269                             QualType ElementType,
1270                             bool UseGlobalDelete) {
1271  // Find the destructor for the type, if applicable.  If the
1272  // destructor is virtual, we'll just emit the vcall and return.
1273  const CXXDestructorDecl *Dtor = 0;
1274  if (const RecordType *RT = ElementType->getAs<RecordType>()) {
1275    CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1276    if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
1277      Dtor = RD->getDestructor();
1278
1279      if (Dtor->isVirtual()) {
1280        if (UseGlobalDelete) {
1281          // If we're supposed to call the global delete, make sure we do so
1282          // even if the destructor throws.
1283          CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1284                                                    Ptr, OperatorDelete,
1285                                                    ElementType);
1286        }
1287
1288        llvm::Type *Ty =
1289          CGF.getTypes().GetFunctionType(
1290                         CGF.getTypes().arrangeCXXDestructor(Dtor, Dtor_Complete));
1291
1292        llvm::Value *Callee
1293          = CGF.BuildVirtualCall(Dtor,
1294                                 UseGlobalDelete? Dtor_Complete : Dtor_Deleting,
1295                                 Ptr, Ty);
1296        CGF.EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0,
1297                              0, 0);
1298
1299        if (UseGlobalDelete) {
1300          CGF.PopCleanupBlock();
1301        }
1302
1303        return;
1304      }
1305    }
1306  }
1307
1308  // Make sure that we call delete even if the dtor throws.
1309  // This doesn't have to a conditional cleanup because we're going
1310  // to pop it off in a second.
1311  CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1312                                            Ptr, OperatorDelete, ElementType);
1313
1314  if (Dtor)
1315    CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1316                              /*ForVirtualBase=*/false, Ptr);
1317  else if (CGF.getLangOptions().ObjCAutoRefCount &&
1318           ElementType->isObjCLifetimeType()) {
1319    switch (ElementType.getObjCLifetime()) {
1320    case Qualifiers::OCL_None:
1321    case Qualifiers::OCL_ExplicitNone:
1322    case Qualifiers::OCL_Autoreleasing:
1323      break;
1324
1325    case Qualifiers::OCL_Strong: {
1326      // Load the pointer value.
1327      llvm::Value *PtrValue = CGF.Builder.CreateLoad(Ptr,
1328                                             ElementType.isVolatileQualified());
1329
1330      CGF.EmitARCRelease(PtrValue, /*precise*/ true);
1331      break;
1332    }
1333
1334    case Qualifiers::OCL_Weak:
1335      CGF.EmitARCDestroyWeak(Ptr);
1336      break;
1337    }
1338  }
1339
1340  CGF.PopCleanupBlock();
1341}
1342
1343namespace {
1344  /// Calls the given 'operator delete' on an array of objects.
1345  struct CallArrayDelete : EHScopeStack::Cleanup {
1346    llvm::Value *Ptr;
1347    const FunctionDecl *OperatorDelete;
1348    llvm::Value *NumElements;
1349    QualType ElementType;
1350    CharUnits CookieSize;
1351
1352    CallArrayDelete(llvm::Value *Ptr,
1353                    const FunctionDecl *OperatorDelete,
1354                    llvm::Value *NumElements,
1355                    QualType ElementType,
1356                    CharUnits CookieSize)
1357      : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
1358        ElementType(ElementType), CookieSize(CookieSize) {}
1359
1360    void Emit(CodeGenFunction &CGF, Flags flags) {
1361      const FunctionProtoType *DeleteFTy =
1362        OperatorDelete->getType()->getAs<FunctionProtoType>();
1363      assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2);
1364
1365      CallArgList Args;
1366
1367      // Pass the pointer as the first argument.
1368      QualType VoidPtrTy = DeleteFTy->getArgType(0);
1369      llvm::Value *DeletePtr
1370        = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
1371      Args.add(RValue::get(DeletePtr), VoidPtrTy);
1372
1373      // Pass the original requested size as the second argument.
1374      if (DeleteFTy->getNumArgs() == 2) {
1375        QualType size_t = DeleteFTy->getArgType(1);
1376        llvm::IntegerType *SizeTy
1377          = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
1378
1379        CharUnits ElementTypeSize =
1380          CGF.CGM.getContext().getTypeSizeInChars(ElementType);
1381
1382        // The size of an element, multiplied by the number of elements.
1383        llvm::Value *Size
1384          = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
1385        Size = CGF.Builder.CreateMul(Size, NumElements);
1386
1387        // Plus the size of the cookie if applicable.
1388        if (!CookieSize.isZero()) {
1389          llvm::Value *CookieSizeV
1390            = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
1391          Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
1392        }
1393
1394        Args.add(RValue::get(Size), size_t);
1395      }
1396
1397      // Emit the call to delete.
1398      CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(Args, DeleteFTy),
1399                   CGF.CGM.GetAddrOfFunction(OperatorDelete),
1400                   ReturnValueSlot(), Args, OperatorDelete);
1401    }
1402  };
1403}
1404
1405/// Emit the code for deleting an array of objects.
1406static void EmitArrayDelete(CodeGenFunction &CGF,
1407                            const CXXDeleteExpr *E,
1408                            llvm::Value *deletedPtr,
1409                            QualType elementType) {
1410  llvm::Value *numElements = 0;
1411  llvm::Value *allocatedPtr = 0;
1412  CharUnits cookieSize;
1413  CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,
1414                                      numElements, allocatedPtr, cookieSize);
1415
1416  assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
1417
1418  // Make sure that we call delete even if one of the dtors throws.
1419  const FunctionDecl *operatorDelete = E->getOperatorDelete();
1420  CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
1421                                           allocatedPtr, operatorDelete,
1422                                           numElements, elementType,
1423                                           cookieSize);
1424
1425  // Destroy the elements.
1426  if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
1427    assert(numElements && "no element count for a type with a destructor!");
1428
1429    llvm::Value *arrayEnd =
1430      CGF.Builder.CreateInBoundsGEP(deletedPtr, numElements, "delete.end");
1431
1432    // Note that it is legal to allocate a zero-length array, and we
1433    // can never fold the check away because the length should always
1434    // come from a cookie.
1435    CGF.emitArrayDestroy(deletedPtr, arrayEnd, elementType,
1436                         CGF.getDestroyer(dtorKind),
1437                         /*checkZeroLength*/ true,
1438                         CGF.needsEHCleanup(dtorKind));
1439  }
1440
1441  // Pop the cleanup block.
1442  CGF.PopCleanupBlock();
1443}
1444
1445void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
1446
1447  // Get at the argument before we performed the implicit conversion
1448  // to void*.
1449  const Expr *Arg = E->getArgument();
1450  while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
1451    if (ICE->getCastKind() != CK_UserDefinedConversion &&
1452        ICE->getType()->isVoidPointerType())
1453      Arg = ICE->getSubExpr();
1454    else
1455      break;
1456  }
1457
1458  llvm::Value *Ptr = EmitScalarExpr(Arg);
1459
1460  // Null check the pointer.
1461  llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
1462  llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
1463
1464  llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull");
1465
1466  Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
1467  EmitBlock(DeleteNotNull);
1468
1469  // We might be deleting a pointer to array.  If so, GEP down to the
1470  // first non-array element.
1471  // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
1472  QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
1473  if (DeleteTy->isConstantArrayType()) {
1474    llvm::Value *Zero = Builder.getInt32(0);
1475    SmallVector<llvm::Value*,8> GEP;
1476
1477    GEP.push_back(Zero); // point at the outermost array
1478
1479    // For each layer of array type we're pointing at:
1480    while (const ConstantArrayType *Arr
1481             = getContext().getAsConstantArrayType(DeleteTy)) {
1482      // 1. Unpeel the array type.
1483      DeleteTy = Arr->getElementType();
1484
1485      // 2. GEP to the first element of the array.
1486      GEP.push_back(Zero);
1487    }
1488
1489    Ptr = Builder.CreateInBoundsGEP(Ptr, GEP, "del.first");
1490  }
1491
1492  assert(ConvertTypeForMem(DeleteTy) ==
1493         cast<llvm::PointerType>(Ptr->getType())->getElementType());
1494
1495  if (E->isArrayForm()) {
1496    EmitArrayDelete(*this, E, Ptr, DeleteTy);
1497  } else {
1498    EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy,
1499                     E->isGlobalDelete());
1500  }
1501
1502  EmitBlock(DeleteEnd);
1503}
1504
1505static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
1506  // void __cxa_bad_typeid();
1507  llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1508
1509  return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1510}
1511
1512static void EmitBadTypeidCall(CodeGenFunction &CGF) {
1513  llvm::Value *Fn = getBadTypeidFn(CGF);
1514  CGF.EmitCallOrInvoke(Fn).setDoesNotReturn();
1515  CGF.Builder.CreateUnreachable();
1516}
1517
1518static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF,
1519                                         const Expr *E,
1520                                         llvm::Type *StdTypeInfoPtrTy) {
1521  // Get the vtable pointer.
1522  llvm::Value *ThisPtr = CGF.EmitLValue(E).getAddress();
1523
1524  // C++ [expr.typeid]p2:
1525  //   If the glvalue expression is obtained by applying the unary * operator to
1526  //   a pointer and the pointer is a null pointer value, the typeid expression
1527  //   throws the std::bad_typeid exception.
1528  if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E->IgnoreParens())) {
1529    if (UO->getOpcode() == UO_Deref) {
1530      llvm::BasicBlock *BadTypeidBlock =
1531        CGF.createBasicBlock("typeid.bad_typeid");
1532      llvm::BasicBlock *EndBlock =
1533        CGF.createBasicBlock("typeid.end");
1534
1535      llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr);
1536      CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
1537
1538      CGF.EmitBlock(BadTypeidBlock);
1539      EmitBadTypeidCall(CGF);
1540      CGF.EmitBlock(EndBlock);
1541    }
1542  }
1543
1544  llvm::Value *Value = CGF.GetVTablePtr(ThisPtr,
1545                                        StdTypeInfoPtrTy->getPointerTo());
1546
1547  // Load the type info.
1548  Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
1549  return CGF.Builder.CreateLoad(Value);
1550}
1551
1552llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
1553  llvm::Type *StdTypeInfoPtrTy =
1554    ConvertType(E->getType())->getPointerTo();
1555
1556  if (E->isTypeOperand()) {
1557    llvm::Constant *TypeInfo =
1558      CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand());
1559    return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
1560  }
1561
1562  // C++ [expr.typeid]p2:
1563  //   When typeid is applied to a glvalue expression whose type is a
1564  //   polymorphic class type, the result refers to a std::type_info object
1565  //   representing the type of the most derived object (that is, the dynamic
1566  //   type) to which the glvalue refers.
1567  if (E->getExprOperand()->isGLValue()) {
1568    if (const RecordType *RT =
1569          E->getExprOperand()->getType()->getAs<RecordType>()) {
1570      const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1571      if (RD->isPolymorphic())
1572        return EmitTypeidFromVTable(*this, E->getExprOperand(),
1573                                    StdTypeInfoPtrTy);
1574    }
1575  }
1576
1577  QualType OperandTy = E->getExprOperand()->getType();
1578  return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
1579                               StdTypeInfoPtrTy);
1580}
1581
1582static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) {
1583  // void *__dynamic_cast(const void *sub,
1584  //                      const abi::__class_type_info *src,
1585  //                      const abi::__class_type_info *dst,
1586  //                      std::ptrdiff_t src2dst_offset);
1587
1588  llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1589  llvm::Type *PtrDiffTy =
1590    CGF.ConvertType(CGF.getContext().getPointerDiffType());
1591
1592  llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1593
1594  llvm::FunctionType *FTy =
1595    llvm::FunctionType::get(Int8PtrTy, Args, false);
1596
1597  return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast");
1598}
1599
1600static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
1601  // void __cxa_bad_cast();
1602  llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1603  return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1604}
1605
1606static void EmitBadCastCall(CodeGenFunction &CGF) {
1607  llvm::Value *Fn = getBadCastFn(CGF);
1608  CGF.EmitCallOrInvoke(Fn).setDoesNotReturn();
1609  CGF.Builder.CreateUnreachable();
1610}
1611
1612static llvm::Value *
1613EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value,
1614                    QualType SrcTy, QualType DestTy,
1615                    llvm::BasicBlock *CastEnd) {
1616  llvm::Type *PtrDiffLTy =
1617    CGF.ConvertType(CGF.getContext().getPointerDiffType());
1618  llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1619
1620  if (const PointerType *PTy = DestTy->getAs<PointerType>()) {
1621    if (PTy->getPointeeType()->isVoidType()) {
1622      // C++ [expr.dynamic.cast]p7:
1623      //   If T is "pointer to cv void," then the result is a pointer to the
1624      //   most derived object pointed to by v.
1625
1626      // Get the vtable pointer.
1627      llvm::Value *VTable = CGF.GetVTablePtr(Value, PtrDiffLTy->getPointerTo());
1628
1629      // Get the offset-to-top from the vtable.
1630      llvm::Value *OffsetToTop =
1631        CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
1632      OffsetToTop = CGF.Builder.CreateLoad(OffsetToTop, "offset.to.top");
1633
1634      // Finally, add the offset to the pointer.
1635      Value = CGF.EmitCastToVoidPtr(Value);
1636      Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
1637
1638      return CGF.Builder.CreateBitCast(Value, DestLTy);
1639    }
1640  }
1641
1642  QualType SrcRecordTy;
1643  QualType DestRecordTy;
1644
1645  if (const PointerType *DestPTy = DestTy->getAs<PointerType>()) {
1646    SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
1647    DestRecordTy = DestPTy->getPointeeType();
1648  } else {
1649    SrcRecordTy = SrcTy;
1650    DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
1651  }
1652
1653  assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
1654  assert(DestRecordTy->isRecordType() && "dest type must be a record type!");
1655
1656  llvm::Value *SrcRTTI =
1657    CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1658  llvm::Value *DestRTTI =
1659    CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1660
1661  // FIXME: Actually compute a hint here.
1662  llvm::Value *OffsetHint = llvm::ConstantInt::get(PtrDiffLTy, -1ULL);
1663
1664  // Emit the call to __dynamic_cast.
1665  Value = CGF.EmitCastToVoidPtr(Value);
1666  Value = CGF.Builder.CreateCall4(getDynamicCastFn(CGF), Value,
1667                                  SrcRTTI, DestRTTI, OffsetHint);
1668  Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1669
1670  /// C++ [expr.dynamic.cast]p9:
1671  ///   A failed cast to reference type throws std::bad_cast
1672  if (DestTy->isReferenceType()) {
1673    llvm::BasicBlock *BadCastBlock =
1674      CGF.createBasicBlock("dynamic_cast.bad_cast");
1675
1676    llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1677    CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1678
1679    CGF.EmitBlock(BadCastBlock);
1680    EmitBadCastCall(CGF);
1681  }
1682
1683  return Value;
1684}
1685
1686static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
1687                                          QualType DestTy) {
1688  llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1689  if (DestTy->isPointerType())
1690    return llvm::Constant::getNullValue(DestLTy);
1691
1692  /// C++ [expr.dynamic.cast]p9:
1693  ///   A failed cast to reference type throws std::bad_cast
1694  EmitBadCastCall(CGF);
1695
1696  CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
1697  return llvm::UndefValue::get(DestLTy);
1698}
1699
1700llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
1701                                              const CXXDynamicCastExpr *DCE) {
1702  QualType DestTy = DCE->getTypeAsWritten();
1703
1704  if (DCE->isAlwaysNull())
1705    return EmitDynamicCastToNull(*this, DestTy);
1706
1707  QualType SrcTy = DCE->getSubExpr()->getType();
1708
1709  // C++ [expr.dynamic.cast]p4:
1710  //   If the value of v is a null pointer value in the pointer case, the result
1711  //   is the null pointer value of type T.
1712  bool ShouldNullCheckSrcValue = SrcTy->isPointerType();
1713
1714  llvm::BasicBlock *CastNull = 0;
1715  llvm::BasicBlock *CastNotNull = 0;
1716  llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
1717
1718  if (ShouldNullCheckSrcValue) {
1719    CastNull = createBasicBlock("dynamic_cast.null");
1720    CastNotNull = createBasicBlock("dynamic_cast.notnull");
1721
1722    llvm::Value *IsNull = Builder.CreateIsNull(Value);
1723    Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
1724    EmitBlock(CastNotNull);
1725  }
1726
1727  Value = EmitDynamicCastCall(*this, Value, SrcTy, DestTy, CastEnd);
1728
1729  if (ShouldNullCheckSrcValue) {
1730    EmitBranch(CastEnd);
1731
1732    EmitBlock(CastNull);
1733    EmitBranch(CastEnd);
1734  }
1735
1736  EmitBlock(CastEnd);
1737
1738  if (ShouldNullCheckSrcValue) {
1739    llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
1740    PHI->addIncoming(Value, CastNotNull);
1741    PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
1742
1743    Value = PHI;
1744  }
1745
1746  return Value;
1747}
1748
1749void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
1750  RunCleanupsScope Scope(*this);
1751
1752  CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
1753  for (LambdaExpr::capture_init_iterator i = E->capture_init_begin(),
1754                                         e = E->capture_init_end();
1755      i != e; ++i, ++CurField) {
1756    // Emit initialization
1757    LValue LV = EmitLValueForFieldInitialization(Slot.getAddr(), *CurField, 0);
1758    ArrayRef<VarDecl *> ArrayIndexes;
1759    if (CurField->getType()->isArrayType())
1760      ArrayIndexes = E->getCaptureInitIndexVars(i);
1761    EmitInitializerForField(*CurField, LV, *i, ArrayIndexes);
1762  }
1763}
1764