1//===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This contains code dealing with code generation of C++ expressions
11//
12//===----------------------------------------------------------------------===//
13
14#include "clang/Frontend/CodeGenOptions.h"
15#include "CodeGenFunction.h"
16#include "CGCUDARuntime.h"
17#include "CGCXXABI.h"
18#include "CGObjCRuntime.h"
19#include "CGDebugInfo.h"
20#include "llvm/Intrinsics.h"
21#include "llvm/Support/CallSite.h"
22
23using namespace clang;
24using namespace CodeGen;
25
26RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
27                                          llvm::Value *Callee,
28                                          ReturnValueSlot ReturnValue,
29                                          llvm::Value *This,
30                                          llvm::Value *VTT,
31                                          CallExpr::const_arg_iterator ArgBeg,
32                                          CallExpr::const_arg_iterator ArgEnd) {
33  assert(MD->isInstance() &&
34         "Trying to emit a member call expr on a static method!");
35
36  // C++11 [class.mfct.non-static]p2:
37  //   If a non-static member function of a class X is called for an object that
38  //   is not of type X, or of a type derived from X, the behavior is undefined.
39  EmitTypeCheck(TCK_MemberCall, This,
40                getContext().getRecordType(MD->getParent()));
41
42  CallArgList Args;
43
44  // Push the this ptr.
45  Args.add(RValue::get(This), MD->getThisType(getContext()));
46
47  // If there is a VTT parameter, emit it.
48  if (VTT) {
49    QualType T = getContext().getPointerType(getContext().VoidPtrTy);
50    Args.add(RValue::get(VTT), T);
51  }
52
53  const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
54  RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size());
55
56  // And the rest of the call args.
57  EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
58
59  return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
60                  Callee, ReturnValue, Args, MD);
61}
62
63// FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
64// quite what we want.
65static const Expr *skipNoOpCastsAndParens(const Expr *E) {
66  while (true) {
67    if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
68      E = PE->getSubExpr();
69      continue;
70    }
71
72    if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
73      if (CE->getCastKind() == CK_NoOp) {
74        E = CE->getSubExpr();
75        continue;
76      }
77    }
78    if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
79      if (UO->getOpcode() == UO_Extension) {
80        E = UO->getSubExpr();
81        continue;
82      }
83    }
84    return E;
85  }
86}
87
88/// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
89/// expr can be devirtualized.
90static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
91                                               const Expr *Base,
92                                               const CXXMethodDecl *MD) {
93
94  // When building with -fapple-kext, all calls must go through the vtable since
95  // the kernel linker can do runtime patching of vtables.
96  if (Context.getLangOpts().AppleKext)
97    return false;
98
99  // If the most derived class is marked final, we know that no subclass can
100  // override this member function and so we can devirtualize it. For example:
101  //
102  // struct A { virtual void f(); }
103  // struct B final : A { };
104  //
105  // void f(B *b) {
106  //   b->f();
107  // }
108  //
109  const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType();
110  if (MostDerivedClassDecl->hasAttr<FinalAttr>())
111    return true;
112
113  // If the member function is marked 'final', we know that it can't be
114  // overridden and can therefore devirtualize it.
115  if (MD->hasAttr<FinalAttr>())
116    return true;
117
118  // Similarly, if the class itself is marked 'final' it can't be overridden
119  // and we can therefore devirtualize the member function call.
120  if (MD->getParent()->hasAttr<FinalAttr>())
121    return true;
122
123  Base = skipNoOpCastsAndParens(Base);
124  if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
125    if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
126      // This is a record decl. We know the type and can devirtualize it.
127      return VD->getType()->isRecordType();
128    }
129
130    return false;
131  }
132
133  // We can devirtualize calls on an object accessed by a class member access
134  // expression, since by C++11 [basic.life]p6 we know that it can't refer to
135  // a derived class object constructed in the same location.
136  if (const MemberExpr *ME = dyn_cast<MemberExpr>(Base))
137    if (const ValueDecl *VD = dyn_cast<ValueDecl>(ME->getMemberDecl()))
138      return VD->getType()->isRecordType();
139
140  // We can always devirtualize calls on temporary object expressions.
141  if (isa<CXXConstructExpr>(Base))
142    return true;
143
144  // And calls on bound temporaries.
145  if (isa<CXXBindTemporaryExpr>(Base))
146    return true;
147
148  // Check if this is a call expr that returns a record type.
149  if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
150    return CE->getCallReturnType()->isRecordType();
151
152  // We can't devirtualize the call.
153  return false;
154}
155
156static CXXRecordDecl *getCXXRecord(const Expr *E) {
157  QualType T = E->getType();
158  if (const PointerType *PTy = T->getAs<PointerType>())
159    T = PTy->getPointeeType();
160  const RecordType *Ty = T->castAs<RecordType>();
161  return cast<CXXRecordDecl>(Ty->getDecl());
162}
163
164// Note: This function also emit constructor calls to support a MSVC
165// extensions allowing explicit constructor function call.
166RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
167                                              ReturnValueSlot ReturnValue) {
168  const Expr *callee = CE->getCallee()->IgnoreParens();
169
170  if (isa<BinaryOperator>(callee))
171    return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
172
173  const MemberExpr *ME = cast<MemberExpr>(callee);
174  const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
175
176  CGDebugInfo *DI = getDebugInfo();
177  if (DI && CGM.getCodeGenOpts().DebugInfo == CodeGenOptions::LimitedDebugInfo
178      && !isa<CallExpr>(ME->getBase())) {
179    QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType();
180    if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) {
181      DI->getOrCreateRecordType(PTy->getPointeeType(),
182                                MD->getParent()->getLocation());
183    }
184  }
185
186  if (MD->isStatic()) {
187    // The method is static, emit it as we would a regular call.
188    llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
189    return EmitCall(getContext().getPointerType(MD->getType()), Callee,
190                    ReturnValue, CE->arg_begin(), CE->arg_end());
191  }
192
193  // Compute the object pointer.
194  const Expr *Base = ME->getBase();
195  bool CanUseVirtualCall = MD->isVirtual() && !ME->hasQualifier();
196
197  const CXXMethodDecl *DevirtualizedMethod = NULL;
198  if (CanUseVirtualCall &&
199      canDevirtualizeMemberFunctionCalls(getContext(), Base, MD)) {
200    const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType();
201    DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl);
202    assert(DevirtualizedMethod);
203    const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent();
204    const Expr *Inner = Base->ignoreParenBaseCasts();
205    if (getCXXRecord(Inner) == DevirtualizedClass)
206      // If the class of the Inner expression is where the dynamic method
207      // is defined, build the this pointer from it.
208      Base = Inner;
209    else if (getCXXRecord(Base) != DevirtualizedClass) {
210      // If the method is defined in a class that is not the best dynamic
211      // one or the one of the full expression, we would have to build
212      // a derived-to-base cast to compute the correct this pointer, but
213      // we don't have support for that yet, so do a virtual call.
214      DevirtualizedMethod = NULL;
215    }
216    // If the return types are not the same, this might be a case where more
217    // code needs to run to compensate for it. For example, the derived
218    // method might return a type that inherits form from the return
219    // type of MD and has a prefix.
220    // For now we just avoid devirtualizing these covariant cases.
221    if (DevirtualizedMethod &&
222        DevirtualizedMethod->getResultType().getCanonicalType() !=
223        MD->getResultType().getCanonicalType())
224      DevirtualizedMethod = NULL;
225  }
226
227  llvm::Value *This;
228  if (ME->isArrow())
229    This = EmitScalarExpr(Base);
230  else
231    This = EmitLValue(Base).getAddress();
232
233
234  if (MD->isTrivial()) {
235    if (isa<CXXDestructorDecl>(MD)) return RValue::get(0);
236    if (isa<CXXConstructorDecl>(MD) &&
237        cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
238      return RValue::get(0);
239
240    if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
241      // We don't like to generate the trivial copy/move assignment operator
242      // when it isn't necessary; just produce the proper effect here.
243      llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
244      EmitAggregateCopy(This, RHS, CE->getType());
245      return RValue::get(This);
246    }
247
248    if (isa<CXXConstructorDecl>(MD) &&
249        cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
250      // Trivial move and copy ctor are the same.
251      llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
252      EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS,
253                                     CE->arg_begin(), CE->arg_end());
254      return RValue::get(This);
255    }
256    llvm_unreachable("unknown trivial member function");
257  }
258
259  // Compute the function type we're calling.
260  const CGFunctionInfo *FInfo = 0;
261  if (isa<CXXDestructorDecl>(MD))
262    FInfo = &CGM.getTypes().arrangeCXXDestructor(cast<CXXDestructorDecl>(MD),
263                                                 Dtor_Complete);
264  else if (isa<CXXConstructorDecl>(MD))
265    FInfo = &CGM.getTypes().arrangeCXXConstructorDeclaration(
266                                                 cast<CXXConstructorDecl>(MD),
267                                                 Ctor_Complete);
268  else
269    FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(MD);
270
271  llvm::Type *Ty = CGM.getTypes().GetFunctionType(*FInfo);
272
273  // C++ [class.virtual]p12:
274  //   Explicit qualification with the scope operator (5.1) suppresses the
275  //   virtual call mechanism.
276  //
277  // We also don't emit a virtual call if the base expression has a record type
278  // because then we know what the type is.
279  bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod;
280
281  llvm::Value *Callee;
282  if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
283    if (UseVirtualCall) {
284      Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty);
285    } else {
286      if (getContext().getLangOpts().AppleKext &&
287          MD->isVirtual() &&
288          ME->hasQualifier())
289        Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
290      else if (!DevirtualizedMethod)
291        Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty);
292      else {
293        const CXXDestructorDecl *DDtor =
294          cast<CXXDestructorDecl>(DevirtualizedMethod);
295        Callee = CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty);
296      }
297    }
298  } else if (const CXXConstructorDecl *Ctor =
299               dyn_cast<CXXConstructorDecl>(MD)) {
300    Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
301  } else if (UseVirtualCall) {
302      Callee = BuildVirtualCall(MD, This, Ty);
303  } else {
304    if (getContext().getLangOpts().AppleKext &&
305        MD->isVirtual() &&
306        ME->hasQualifier())
307      Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
308    else if (!DevirtualizedMethod)
309      Callee = CGM.GetAddrOfFunction(MD, Ty);
310    else {
311      Callee = CGM.GetAddrOfFunction(DevirtualizedMethod, Ty);
312    }
313  }
314
315  return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
316                           CE->arg_begin(), CE->arg_end());
317}
318
319RValue
320CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
321                                              ReturnValueSlot ReturnValue) {
322  const BinaryOperator *BO =
323      cast<BinaryOperator>(E->getCallee()->IgnoreParens());
324  const Expr *BaseExpr = BO->getLHS();
325  const Expr *MemFnExpr = BO->getRHS();
326
327  const MemberPointerType *MPT =
328    MemFnExpr->getType()->castAs<MemberPointerType>();
329
330  const FunctionProtoType *FPT =
331    MPT->getPointeeType()->castAs<FunctionProtoType>();
332  const CXXRecordDecl *RD =
333    cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
334
335  // Get the member function pointer.
336  llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
337
338  // Emit the 'this' pointer.
339  llvm::Value *This;
340
341  if (BO->getOpcode() == BO_PtrMemI)
342    This = EmitScalarExpr(BaseExpr);
343  else
344    This = EmitLValue(BaseExpr).getAddress();
345
346  EmitTypeCheck(TCK_MemberCall, This, QualType(MPT->getClass(), 0));
347
348  // Ask the ABI to load the callee.  Note that This is modified.
349  llvm::Value *Callee =
350    CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, This, MemFnPtr, MPT);
351
352  CallArgList Args;
353
354  QualType ThisType =
355    getContext().getPointerType(getContext().getTagDeclType(RD));
356
357  // Push the this ptr.
358  Args.add(RValue::get(This), ThisType);
359
360  RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1);
361
362  // And the rest of the call args
363  EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
364  return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required), Callee,
365                  ReturnValue, Args);
366}
367
368RValue
369CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
370                                               const CXXMethodDecl *MD,
371                                               ReturnValueSlot ReturnValue) {
372  assert(MD->isInstance() &&
373         "Trying to emit a member call expr on a static method!");
374  LValue LV = EmitLValue(E->getArg(0));
375  llvm::Value *This = LV.getAddress();
376
377  if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
378      MD->isTrivial()) {
379    llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
380    QualType Ty = E->getType();
381    EmitAggregateCopy(This, Src, Ty);
382    return RValue::get(This);
383  }
384
385  llvm::Value *Callee = EmitCXXOperatorMemberCallee(E, MD, This);
386  return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
387                           E->arg_begin() + 1, E->arg_end());
388}
389
390RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
391                                               ReturnValueSlot ReturnValue) {
392  return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue);
393}
394
395static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
396                                            llvm::Value *DestPtr,
397                                            const CXXRecordDecl *Base) {
398  if (Base->isEmpty())
399    return;
400
401  DestPtr = CGF.EmitCastToVoidPtr(DestPtr);
402
403  const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
404  CharUnits Size = Layout.getNonVirtualSize();
405  CharUnits Align = Layout.getNonVirtualAlign();
406
407  llvm::Value *SizeVal = CGF.CGM.getSize(Size);
408
409  // If the type contains a pointer to data member we can't memset it to zero.
410  // Instead, create a null constant and copy it to the destination.
411  // TODO: there are other patterns besides zero that we can usefully memset,
412  // like -1, which happens to be the pattern used by member-pointers.
413  // TODO: isZeroInitializable can be over-conservative in the case where a
414  // virtual base contains a member pointer.
415  if (!CGF.CGM.getTypes().isZeroInitializable(Base)) {
416    llvm::Constant *NullConstant = CGF.CGM.EmitNullConstantForBase(Base);
417
418    llvm::GlobalVariable *NullVariable =
419      new llvm::GlobalVariable(CGF.CGM.getModule(), NullConstant->getType(),
420                               /*isConstant=*/true,
421                               llvm::GlobalVariable::PrivateLinkage,
422                               NullConstant, Twine());
423    NullVariable->setAlignment(Align.getQuantity());
424    llvm::Value *SrcPtr = CGF.EmitCastToVoidPtr(NullVariable);
425
426    // Get and call the appropriate llvm.memcpy overload.
427    CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity());
428    return;
429  }
430
431  // Otherwise, just memset the whole thing to zero.  This is legal
432  // because in LLVM, all default initializers (other than the ones we just
433  // handled above) are guaranteed to have a bit pattern of all zeros.
434  CGF.Builder.CreateMemSet(DestPtr, CGF.Builder.getInt8(0), SizeVal,
435                           Align.getQuantity());
436}
437
438void
439CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
440                                      AggValueSlot Dest) {
441  assert(!Dest.isIgnored() && "Must have a destination!");
442  const CXXConstructorDecl *CD = E->getConstructor();
443
444  // If we require zero initialization before (or instead of) calling the
445  // constructor, as can be the case with a non-user-provided default
446  // constructor, emit the zero initialization now, unless destination is
447  // already zeroed.
448  if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
449    switch (E->getConstructionKind()) {
450    case CXXConstructExpr::CK_Delegating:
451    case CXXConstructExpr::CK_Complete:
452      EmitNullInitialization(Dest.getAddr(), E->getType());
453      break;
454    case CXXConstructExpr::CK_VirtualBase:
455    case CXXConstructExpr::CK_NonVirtualBase:
456      EmitNullBaseClassInitialization(*this, Dest.getAddr(), CD->getParent());
457      break;
458    }
459  }
460
461  // If this is a call to a trivial default constructor, do nothing.
462  if (CD->isTrivial() && CD->isDefaultConstructor())
463    return;
464
465  // Elide the constructor if we're constructing from a temporary.
466  // The temporary check is required because Sema sets this on NRVO
467  // returns.
468  if (getContext().getLangOpts().ElideConstructors && E->isElidable()) {
469    assert(getContext().hasSameUnqualifiedType(E->getType(),
470                                               E->getArg(0)->getType()));
471    if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
472      EmitAggExpr(E->getArg(0), Dest);
473      return;
474    }
475  }
476
477  if (const ConstantArrayType *arrayType
478        = getContext().getAsConstantArrayType(E->getType())) {
479    EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddr(),
480                               E->arg_begin(), E->arg_end());
481  } else {
482    CXXCtorType Type = Ctor_Complete;
483    bool ForVirtualBase = false;
484
485    switch (E->getConstructionKind()) {
486     case CXXConstructExpr::CK_Delegating:
487      // We should be emitting a constructor; GlobalDecl will assert this
488      Type = CurGD.getCtorType();
489      break;
490
491     case CXXConstructExpr::CK_Complete:
492      Type = Ctor_Complete;
493      break;
494
495     case CXXConstructExpr::CK_VirtualBase:
496      ForVirtualBase = true;
497      // fall-through
498
499     case CXXConstructExpr::CK_NonVirtualBase:
500      Type = Ctor_Base;
501    }
502
503    // Call the constructor.
504    EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest.getAddr(),
505                           E->arg_begin(), E->arg_end());
506  }
507}
508
509void
510CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
511                                            llvm::Value *Src,
512                                            const Expr *Exp) {
513  if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
514    Exp = E->getSubExpr();
515  assert(isa<CXXConstructExpr>(Exp) &&
516         "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
517  const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
518  const CXXConstructorDecl *CD = E->getConstructor();
519  RunCleanupsScope Scope(*this);
520
521  // If we require zero initialization before (or instead of) calling the
522  // constructor, as can be the case with a non-user-provided default
523  // constructor, emit the zero initialization now.
524  // FIXME. Do I still need this for a copy ctor synthesis?
525  if (E->requiresZeroInitialization())
526    EmitNullInitialization(Dest, E->getType());
527
528  assert(!getContext().getAsConstantArrayType(E->getType())
529         && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
530  EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src,
531                                 E->arg_begin(), E->arg_end());
532}
533
534static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
535                                        const CXXNewExpr *E) {
536  if (!E->isArray())
537    return CharUnits::Zero();
538
539  // No cookie is required if the operator new[] being used is the
540  // reserved placement operator new[].
541  if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
542    return CharUnits::Zero();
543
544  return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
545}
546
547static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
548                                        const CXXNewExpr *e,
549                                        unsigned minElements,
550                                        llvm::Value *&numElements,
551                                        llvm::Value *&sizeWithoutCookie) {
552  QualType type = e->getAllocatedType();
553
554  if (!e->isArray()) {
555    CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
556    sizeWithoutCookie
557      = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
558    return sizeWithoutCookie;
559  }
560
561  // The width of size_t.
562  unsigned sizeWidth = CGF.SizeTy->getBitWidth();
563
564  // Figure out the cookie size.
565  llvm::APInt cookieSize(sizeWidth,
566                         CalculateCookiePadding(CGF, e).getQuantity());
567
568  // Emit the array size expression.
569  // We multiply the size of all dimensions for NumElements.
570  // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
571  numElements = CGF.EmitScalarExpr(e->getArraySize());
572  assert(isa<llvm::IntegerType>(numElements->getType()));
573
574  // The number of elements can be have an arbitrary integer type;
575  // essentially, we need to multiply it by a constant factor, add a
576  // cookie size, and verify that the result is representable as a
577  // size_t.  That's just a gloss, though, and it's wrong in one
578  // important way: if the count is negative, it's an error even if
579  // the cookie size would bring the total size >= 0.
580  bool isSigned
581    = e->getArraySize()->getType()->isSignedIntegerOrEnumerationType();
582  llvm::IntegerType *numElementsType
583    = cast<llvm::IntegerType>(numElements->getType());
584  unsigned numElementsWidth = numElementsType->getBitWidth();
585
586  // Compute the constant factor.
587  llvm::APInt arraySizeMultiplier(sizeWidth, 1);
588  while (const ConstantArrayType *CAT
589             = CGF.getContext().getAsConstantArrayType(type)) {
590    type = CAT->getElementType();
591    arraySizeMultiplier *= CAT->getSize();
592  }
593
594  CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
595  llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
596  typeSizeMultiplier *= arraySizeMultiplier;
597
598  // This will be a size_t.
599  llvm::Value *size;
600
601  // If someone is doing 'new int[42]' there is no need to do a dynamic check.
602  // Don't bloat the -O0 code.
603  if (llvm::ConstantInt *numElementsC =
604        dyn_cast<llvm::ConstantInt>(numElements)) {
605    const llvm::APInt &count = numElementsC->getValue();
606
607    bool hasAnyOverflow = false;
608
609    // If 'count' was a negative number, it's an overflow.
610    if (isSigned && count.isNegative())
611      hasAnyOverflow = true;
612
613    // We want to do all this arithmetic in size_t.  If numElements is
614    // wider than that, check whether it's already too big, and if so,
615    // overflow.
616    else if (numElementsWidth > sizeWidth &&
617             numElementsWidth - sizeWidth > count.countLeadingZeros())
618      hasAnyOverflow = true;
619
620    // Okay, compute a count at the right width.
621    llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
622
623    // If there is a brace-initializer, we cannot allocate fewer elements than
624    // there are initializers. If we do, that's treated like an overflow.
625    if (adjustedCount.ult(minElements))
626      hasAnyOverflow = true;
627
628    // Scale numElements by that.  This might overflow, but we don't
629    // care because it only overflows if allocationSize does, too, and
630    // if that overflows then we shouldn't use this.
631    numElements = llvm::ConstantInt::get(CGF.SizeTy,
632                                         adjustedCount * arraySizeMultiplier);
633
634    // Compute the size before cookie, and track whether it overflowed.
635    bool overflow;
636    llvm::APInt allocationSize
637      = adjustedCount.umul_ov(typeSizeMultiplier, overflow);
638    hasAnyOverflow |= overflow;
639
640    // Add in the cookie, and check whether it's overflowed.
641    if (cookieSize != 0) {
642      // Save the current size without a cookie.  This shouldn't be
643      // used if there was overflow.
644      sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
645
646      allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
647      hasAnyOverflow |= overflow;
648    }
649
650    // On overflow, produce a -1 so operator new will fail.
651    if (hasAnyOverflow) {
652      size = llvm::Constant::getAllOnesValue(CGF.SizeTy);
653    } else {
654      size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
655    }
656
657  // Otherwise, we might need to use the overflow intrinsics.
658  } else {
659    // There are up to five conditions we need to test for:
660    // 1) if isSigned, we need to check whether numElements is negative;
661    // 2) if numElementsWidth > sizeWidth, we need to check whether
662    //   numElements is larger than something representable in size_t;
663    // 3) if minElements > 0, we need to check whether numElements is smaller
664    //    than that.
665    // 4) we need to compute
666    //      sizeWithoutCookie := numElements * typeSizeMultiplier
667    //    and check whether it overflows; and
668    // 5) if we need a cookie, we need to compute
669    //      size := sizeWithoutCookie + cookieSize
670    //    and check whether it overflows.
671
672    llvm::Value *hasOverflow = 0;
673
674    // If numElementsWidth > sizeWidth, then one way or another, we're
675    // going to have to do a comparison for (2), and this happens to
676    // take care of (1), too.
677    if (numElementsWidth > sizeWidth) {
678      llvm::APInt threshold(numElementsWidth, 1);
679      threshold <<= sizeWidth;
680
681      llvm::Value *thresholdV
682        = llvm::ConstantInt::get(numElementsType, threshold);
683
684      hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);
685      numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);
686
687    // Otherwise, if we're signed, we want to sext up to size_t.
688    } else if (isSigned) {
689      if (numElementsWidth < sizeWidth)
690        numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);
691
692      // If there's a non-1 type size multiplier, then we can do the
693      // signedness check at the same time as we do the multiply
694      // because a negative number times anything will cause an
695      // unsigned overflow.  Otherwise, we have to do it here. But at least
696      // in this case, we can subsume the >= minElements check.
697      if (typeSizeMultiplier == 1)
698        hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
699                              llvm::ConstantInt::get(CGF.SizeTy, minElements));
700
701    // Otherwise, zext up to size_t if necessary.
702    } else if (numElementsWidth < sizeWidth) {
703      numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);
704    }
705
706    assert(numElements->getType() == CGF.SizeTy);
707
708    if (minElements) {
709      // Don't allow allocation of fewer elements than we have initializers.
710      if (!hasOverflow) {
711        hasOverflow = CGF.Builder.CreateICmpULT(numElements,
712                              llvm::ConstantInt::get(CGF.SizeTy, minElements));
713      } else if (numElementsWidth > sizeWidth) {
714        // The other existing overflow subsumes this check.
715        // We do an unsigned comparison, since any signed value < -1 is
716        // taken care of either above or below.
717        hasOverflow = CGF.Builder.CreateOr(hasOverflow,
718                          CGF.Builder.CreateICmpULT(numElements,
719                              llvm::ConstantInt::get(CGF.SizeTy, minElements)));
720      }
721    }
722
723    size = numElements;
724
725    // Multiply by the type size if necessary.  This multiplier
726    // includes all the factors for nested arrays.
727    //
728    // This step also causes numElements to be scaled up by the
729    // nested-array factor if necessary.  Overflow on this computation
730    // can be ignored because the result shouldn't be used if
731    // allocation fails.
732    if (typeSizeMultiplier != 1) {
733      llvm::Value *umul_with_overflow
734        = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
735
736      llvm::Value *tsmV =
737        llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
738      llvm::Value *result =
739        CGF.Builder.CreateCall2(umul_with_overflow, size, tsmV);
740
741      llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
742      if (hasOverflow)
743        hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
744      else
745        hasOverflow = overflowed;
746
747      size = CGF.Builder.CreateExtractValue(result, 0);
748
749      // Also scale up numElements by the array size multiplier.
750      if (arraySizeMultiplier != 1) {
751        // If the base element type size is 1, then we can re-use the
752        // multiply we just did.
753        if (typeSize.isOne()) {
754          assert(arraySizeMultiplier == typeSizeMultiplier);
755          numElements = size;
756
757        // Otherwise we need a separate multiply.
758        } else {
759          llvm::Value *asmV =
760            llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
761          numElements = CGF.Builder.CreateMul(numElements, asmV);
762        }
763      }
764    } else {
765      // numElements doesn't need to be scaled.
766      assert(arraySizeMultiplier == 1);
767    }
768
769    // Add in the cookie size if necessary.
770    if (cookieSize != 0) {
771      sizeWithoutCookie = size;
772
773      llvm::Value *uadd_with_overflow
774        = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
775
776      llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
777      llvm::Value *result =
778        CGF.Builder.CreateCall2(uadd_with_overflow, size, cookieSizeV);
779
780      llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
781      if (hasOverflow)
782        hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
783      else
784        hasOverflow = overflowed;
785
786      size = CGF.Builder.CreateExtractValue(result, 0);
787    }
788
789    // If we had any possibility of dynamic overflow, make a select to
790    // overwrite 'size' with an all-ones value, which should cause
791    // operator new to throw.
792    if (hasOverflow)
793      size = CGF.Builder.CreateSelect(hasOverflow,
794                                 llvm::Constant::getAllOnesValue(CGF.SizeTy),
795                                      size);
796  }
797
798  if (cookieSize == 0)
799    sizeWithoutCookie = size;
800  else
801    assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
802
803  return size;
804}
805
806static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
807                                    QualType AllocType, llvm::Value *NewPtr) {
808
809  CharUnits Alignment = CGF.getContext().getTypeAlignInChars(AllocType);
810  if (!CGF.hasAggregateLLVMType(AllocType))
811    CGF.EmitScalarInit(Init, 0, CGF.MakeAddrLValue(NewPtr, AllocType,
812                                                   Alignment),
813                       false);
814  else if (AllocType->isAnyComplexType())
815    CGF.EmitComplexExprIntoAddr(Init, NewPtr,
816                                AllocType.isVolatileQualified());
817  else {
818    AggValueSlot Slot
819      = AggValueSlot::forAddr(NewPtr, Alignment, AllocType.getQualifiers(),
820                              AggValueSlot::IsDestructed,
821                              AggValueSlot::DoesNotNeedGCBarriers,
822                              AggValueSlot::IsNotAliased);
823    CGF.EmitAggExpr(Init, Slot);
824
825    CGF.MaybeEmitStdInitializerListCleanup(NewPtr, Init);
826  }
827}
828
829void
830CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
831                                         QualType elementType,
832                                         llvm::Value *beginPtr,
833                                         llvm::Value *numElements) {
834  if (!E->hasInitializer())
835    return; // We have a POD type.
836
837  llvm::Value *explicitPtr = beginPtr;
838  // Find the end of the array, hoisted out of the loop.
839  llvm::Value *endPtr =
840    Builder.CreateInBoundsGEP(beginPtr, numElements, "array.end");
841
842  unsigned initializerElements = 0;
843
844  const Expr *Init = E->getInitializer();
845  llvm::AllocaInst *endOfInit = 0;
846  QualType::DestructionKind dtorKind = elementType.isDestructedType();
847  EHScopeStack::stable_iterator cleanup;
848  llvm::Instruction *cleanupDominator = 0;
849  // If the initializer is an initializer list, first do the explicit elements.
850  if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
851    initializerElements = ILE->getNumInits();
852
853    // Enter a partial-destruction cleanup if necessary.
854    if (needsEHCleanup(dtorKind)) {
855      // In principle we could tell the cleanup where we are more
856      // directly, but the control flow can get so varied here that it
857      // would actually be quite complex.  Therefore we go through an
858      // alloca.
859      endOfInit = CreateTempAlloca(beginPtr->getType(), "array.endOfInit");
860      cleanupDominator = Builder.CreateStore(beginPtr, endOfInit);
861      pushIrregularPartialArrayCleanup(beginPtr, endOfInit, elementType,
862                                       getDestroyer(dtorKind));
863      cleanup = EHStack.stable_begin();
864    }
865
866    for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) {
867      // Tell the cleanup that it needs to destroy up to this
868      // element.  TODO: some of these stores can be trivially
869      // observed to be unnecessary.
870      if (endOfInit) Builder.CreateStore(explicitPtr, endOfInit);
871      StoreAnyExprIntoOneUnit(*this, ILE->getInit(i), elementType, explicitPtr);
872      explicitPtr =Builder.CreateConstGEP1_32(explicitPtr, 1, "array.exp.next");
873    }
874
875    // The remaining elements are filled with the array filler expression.
876    Init = ILE->getArrayFiller();
877  }
878
879  // Create the continuation block.
880  llvm::BasicBlock *contBB = createBasicBlock("new.loop.end");
881
882  // If the number of elements isn't constant, we have to now check if there is
883  // anything left to initialize.
884  if (llvm::ConstantInt *constNum = dyn_cast<llvm::ConstantInt>(numElements)) {
885    // If all elements have already been initialized, skip the whole loop.
886    if (constNum->getZExtValue() <= initializerElements) {
887      // If there was a cleanup, deactivate it.
888      if (cleanupDominator)
889        DeactivateCleanupBlock(cleanup, cleanupDominator);
890      return;
891    }
892  } else {
893    llvm::BasicBlock *nonEmptyBB = createBasicBlock("new.loop.nonempty");
894    llvm::Value *isEmpty = Builder.CreateICmpEQ(explicitPtr, endPtr,
895                                                "array.isempty");
896    Builder.CreateCondBr(isEmpty, contBB, nonEmptyBB);
897    EmitBlock(nonEmptyBB);
898  }
899
900  // Enter the loop.
901  llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
902  llvm::BasicBlock *loopBB = createBasicBlock("new.loop");
903
904  EmitBlock(loopBB);
905
906  // Set up the current-element phi.
907  llvm::PHINode *curPtr =
908    Builder.CreatePHI(explicitPtr->getType(), 2, "array.cur");
909  curPtr->addIncoming(explicitPtr, entryBB);
910
911  // Store the new cleanup position for irregular cleanups.
912  if (endOfInit) Builder.CreateStore(curPtr, endOfInit);
913
914  // Enter a partial-destruction cleanup if necessary.
915  if (!cleanupDominator && needsEHCleanup(dtorKind)) {
916    pushRegularPartialArrayCleanup(beginPtr, curPtr, elementType,
917                                   getDestroyer(dtorKind));
918    cleanup = EHStack.stable_begin();
919    cleanupDominator = Builder.CreateUnreachable();
920  }
921
922  // Emit the initializer into this element.
923  StoreAnyExprIntoOneUnit(*this, Init, E->getAllocatedType(), curPtr);
924
925  // Leave the cleanup if we entered one.
926  if (cleanupDominator) {
927    DeactivateCleanupBlock(cleanup, cleanupDominator);
928    cleanupDominator->eraseFromParent();
929  }
930
931  // Advance to the next element.
932  llvm::Value *nextPtr = Builder.CreateConstGEP1_32(curPtr, 1, "array.next");
933
934  // Check whether we've gotten to the end of the array and, if so,
935  // exit the loop.
936  llvm::Value *isEnd = Builder.CreateICmpEQ(nextPtr, endPtr, "array.atend");
937  Builder.CreateCondBr(isEnd, contBB, loopBB);
938  curPtr->addIncoming(nextPtr, Builder.GetInsertBlock());
939
940  EmitBlock(contBB);
941}
942
943static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
944                           llvm::Value *NewPtr, llvm::Value *Size) {
945  CGF.EmitCastToVoidPtr(NewPtr);
946  CharUnits Alignment = CGF.getContext().getTypeAlignInChars(T);
947  CGF.Builder.CreateMemSet(NewPtr, CGF.Builder.getInt8(0), Size,
948                           Alignment.getQuantity(), false);
949}
950
951static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
952                               QualType ElementType,
953                               llvm::Value *NewPtr,
954                               llvm::Value *NumElements,
955                               llvm::Value *AllocSizeWithoutCookie) {
956  const Expr *Init = E->getInitializer();
957  if (E->isArray()) {
958    if (const CXXConstructExpr *CCE = dyn_cast_or_null<CXXConstructExpr>(Init)){
959      CXXConstructorDecl *Ctor = CCE->getConstructor();
960      if (Ctor->isTrivial()) {
961        // If new expression did not specify value-initialization, then there
962        // is no initialization.
963        if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty())
964          return;
965
966        if (CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
967          // Optimization: since zero initialization will just set the memory
968          // to all zeroes, generate a single memset to do it in one shot.
969          EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
970          return;
971        }
972      }
973
974      CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
975                                     CCE->arg_begin(),  CCE->arg_end(),
976                                     CCE->requiresZeroInitialization());
977      return;
978    } else if (Init && isa<ImplicitValueInitExpr>(Init) &&
979               CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
980      // Optimization: since zero initialization will just set the memory
981      // to all zeroes, generate a single memset to do it in one shot.
982      EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
983      return;
984    }
985    CGF.EmitNewArrayInitializer(E, ElementType, NewPtr, NumElements);
986    return;
987  }
988
989  if (!Init)
990    return;
991
992  StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr);
993}
994
995namespace {
996  /// A cleanup to call the given 'operator delete' function upon
997  /// abnormal exit from a new expression.
998  class CallDeleteDuringNew : public EHScopeStack::Cleanup {
999    size_t NumPlacementArgs;
1000    const FunctionDecl *OperatorDelete;
1001    llvm::Value *Ptr;
1002    llvm::Value *AllocSize;
1003
1004    RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
1005
1006  public:
1007    static size_t getExtraSize(size_t NumPlacementArgs) {
1008      return NumPlacementArgs * sizeof(RValue);
1009    }
1010
1011    CallDeleteDuringNew(size_t NumPlacementArgs,
1012                        const FunctionDecl *OperatorDelete,
1013                        llvm::Value *Ptr,
1014                        llvm::Value *AllocSize)
1015      : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
1016        Ptr(Ptr), AllocSize(AllocSize) {}
1017
1018    void setPlacementArg(unsigned I, RValue Arg) {
1019      assert(I < NumPlacementArgs && "index out of range");
1020      getPlacementArgs()[I] = Arg;
1021    }
1022
1023    void Emit(CodeGenFunction &CGF, Flags flags) {
1024      const FunctionProtoType *FPT
1025        = OperatorDelete->getType()->getAs<FunctionProtoType>();
1026      assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
1027             (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
1028
1029      CallArgList DeleteArgs;
1030
1031      // The first argument is always a void*.
1032      FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
1033      DeleteArgs.add(RValue::get(Ptr), *AI++);
1034
1035      // A member 'operator delete' can take an extra 'size_t' argument.
1036      if (FPT->getNumArgs() == NumPlacementArgs + 2)
1037        DeleteArgs.add(RValue::get(AllocSize), *AI++);
1038
1039      // Pass the rest of the arguments, which must match exactly.
1040      for (unsigned I = 0; I != NumPlacementArgs; ++I)
1041        DeleteArgs.add(getPlacementArgs()[I], *AI++);
1042
1043      // Call 'operator delete'.
1044      CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(DeleteArgs, FPT),
1045                   CGF.CGM.GetAddrOfFunction(OperatorDelete),
1046                   ReturnValueSlot(), DeleteArgs, OperatorDelete);
1047    }
1048  };
1049
1050  /// A cleanup to call the given 'operator delete' function upon
1051  /// abnormal exit from a new expression when the new expression is
1052  /// conditional.
1053  class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup {
1054    size_t NumPlacementArgs;
1055    const FunctionDecl *OperatorDelete;
1056    DominatingValue<RValue>::saved_type Ptr;
1057    DominatingValue<RValue>::saved_type AllocSize;
1058
1059    DominatingValue<RValue>::saved_type *getPlacementArgs() {
1060      return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
1061    }
1062
1063  public:
1064    static size_t getExtraSize(size_t NumPlacementArgs) {
1065      return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
1066    }
1067
1068    CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
1069                                   const FunctionDecl *OperatorDelete,
1070                                   DominatingValue<RValue>::saved_type Ptr,
1071                              DominatingValue<RValue>::saved_type AllocSize)
1072      : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
1073        Ptr(Ptr), AllocSize(AllocSize) {}
1074
1075    void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
1076      assert(I < NumPlacementArgs && "index out of range");
1077      getPlacementArgs()[I] = Arg;
1078    }
1079
1080    void Emit(CodeGenFunction &CGF, Flags flags) {
1081      const FunctionProtoType *FPT
1082        = OperatorDelete->getType()->getAs<FunctionProtoType>();
1083      assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
1084             (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
1085
1086      CallArgList DeleteArgs;
1087
1088      // The first argument is always a void*.
1089      FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
1090      DeleteArgs.add(Ptr.restore(CGF), *AI++);
1091
1092      // A member 'operator delete' can take an extra 'size_t' argument.
1093      if (FPT->getNumArgs() == NumPlacementArgs + 2) {
1094        RValue RV = AllocSize.restore(CGF);
1095        DeleteArgs.add(RV, *AI++);
1096      }
1097
1098      // Pass the rest of the arguments, which must match exactly.
1099      for (unsigned I = 0; I != NumPlacementArgs; ++I) {
1100        RValue RV = getPlacementArgs()[I].restore(CGF);
1101        DeleteArgs.add(RV, *AI++);
1102      }
1103
1104      // Call 'operator delete'.
1105      CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(DeleteArgs, FPT),
1106                   CGF.CGM.GetAddrOfFunction(OperatorDelete),
1107                   ReturnValueSlot(), DeleteArgs, OperatorDelete);
1108    }
1109  };
1110}
1111
1112/// Enter a cleanup to call 'operator delete' if the initializer in a
1113/// new-expression throws.
1114static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
1115                                  const CXXNewExpr *E,
1116                                  llvm::Value *NewPtr,
1117                                  llvm::Value *AllocSize,
1118                                  const CallArgList &NewArgs) {
1119  // If we're not inside a conditional branch, then the cleanup will
1120  // dominate and we can do the easier (and more efficient) thing.
1121  if (!CGF.isInConditionalBranch()) {
1122    CallDeleteDuringNew *Cleanup = CGF.EHStack
1123      .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
1124                                                 E->getNumPlacementArgs(),
1125                                                 E->getOperatorDelete(),
1126                                                 NewPtr, AllocSize);
1127    for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
1128      Cleanup->setPlacementArg(I, NewArgs[I+1].RV);
1129
1130    return;
1131  }
1132
1133  // Otherwise, we need to save all this stuff.
1134  DominatingValue<RValue>::saved_type SavedNewPtr =
1135    DominatingValue<RValue>::save(CGF, RValue::get(NewPtr));
1136  DominatingValue<RValue>::saved_type SavedAllocSize =
1137    DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
1138
1139  CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
1140    .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(EHCleanup,
1141                                                 E->getNumPlacementArgs(),
1142                                                 E->getOperatorDelete(),
1143                                                 SavedNewPtr,
1144                                                 SavedAllocSize);
1145  for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
1146    Cleanup->setPlacementArg(I,
1147                     DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV));
1148
1149  CGF.initFullExprCleanup();
1150}
1151
1152llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
1153  // The element type being allocated.
1154  QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
1155
1156  // 1. Build a call to the allocation function.
1157  FunctionDecl *allocator = E->getOperatorNew();
1158  const FunctionProtoType *allocatorType =
1159    allocator->getType()->castAs<FunctionProtoType>();
1160
1161  CallArgList allocatorArgs;
1162
1163  // The allocation size is the first argument.
1164  QualType sizeType = getContext().getSizeType();
1165
1166  // If there is a brace-initializer, cannot allocate fewer elements than inits.
1167  unsigned minElements = 0;
1168  if (E->isArray() && E->hasInitializer()) {
1169    if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer()))
1170      minElements = ILE->getNumInits();
1171  }
1172
1173  llvm::Value *numElements = 0;
1174  llvm::Value *allocSizeWithoutCookie = 0;
1175  llvm::Value *allocSize =
1176    EmitCXXNewAllocSize(*this, E, minElements, numElements,
1177                        allocSizeWithoutCookie);
1178
1179  allocatorArgs.add(RValue::get(allocSize), sizeType);
1180
1181  // Emit the rest of the arguments.
1182  // FIXME: Ideally, this should just use EmitCallArgs.
1183  CXXNewExpr::const_arg_iterator placementArg = E->placement_arg_begin();
1184
1185  // First, use the types from the function type.
1186  // We start at 1 here because the first argument (the allocation size)
1187  // has already been emitted.
1188  for (unsigned i = 1, e = allocatorType->getNumArgs(); i != e;
1189       ++i, ++placementArg) {
1190    QualType argType = allocatorType->getArgType(i);
1191
1192    assert(getContext().hasSameUnqualifiedType(argType.getNonReferenceType(),
1193                                               placementArg->getType()) &&
1194           "type mismatch in call argument!");
1195
1196    EmitCallArg(allocatorArgs, *placementArg, argType);
1197  }
1198
1199  // Either we've emitted all the call args, or we have a call to a
1200  // variadic function.
1201  assert((placementArg == E->placement_arg_end() ||
1202          allocatorType->isVariadic()) &&
1203         "Extra arguments to non-variadic function!");
1204
1205  // If we still have any arguments, emit them using the type of the argument.
1206  for (CXXNewExpr::const_arg_iterator placementArgsEnd = E->placement_arg_end();
1207       placementArg != placementArgsEnd; ++placementArg) {
1208    EmitCallArg(allocatorArgs, *placementArg, placementArg->getType());
1209  }
1210
1211  // Emit the allocation call.  If the allocator is a global placement
1212  // operator, just "inline" it directly.
1213  RValue RV;
1214  if (allocator->isReservedGlobalPlacementOperator()) {
1215    assert(allocatorArgs.size() == 2);
1216    RV = allocatorArgs[1].RV;
1217    // TODO: kill any unnecessary computations done for the size
1218    // argument.
1219  } else {
1220    RV = EmitCall(CGM.getTypes().arrangeFreeFunctionCall(allocatorArgs,
1221                                                         allocatorType),
1222                  CGM.GetAddrOfFunction(allocator), ReturnValueSlot(),
1223                  allocatorArgs, allocator);
1224  }
1225
1226  // Emit a null check on the allocation result if the allocation
1227  // function is allowed to return null (because it has a non-throwing
1228  // exception spec; for this part, we inline
1229  // CXXNewExpr::shouldNullCheckAllocation()) and we have an
1230  // interesting initializer.
1231  bool nullCheck = allocatorType->isNothrow(getContext()) &&
1232    (!allocType.isPODType(getContext()) || E->hasInitializer());
1233
1234  llvm::BasicBlock *nullCheckBB = 0;
1235  llvm::BasicBlock *contBB = 0;
1236
1237  llvm::Value *allocation = RV.getScalarVal();
1238  unsigned AS =
1239    cast<llvm::PointerType>(allocation->getType())->getAddressSpace();
1240
1241  // The null-check means that the initializer is conditionally
1242  // evaluated.
1243  ConditionalEvaluation conditional(*this);
1244
1245  if (nullCheck) {
1246    conditional.begin(*this);
1247
1248    nullCheckBB = Builder.GetInsertBlock();
1249    llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
1250    contBB = createBasicBlock("new.cont");
1251
1252    llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull");
1253    Builder.CreateCondBr(isNull, contBB, notNullBB);
1254    EmitBlock(notNullBB);
1255  }
1256
1257  // If there's an operator delete, enter a cleanup to call it if an
1258  // exception is thrown.
1259  EHScopeStack::stable_iterator operatorDeleteCleanup;
1260  llvm::Instruction *cleanupDominator = 0;
1261  if (E->getOperatorDelete() &&
1262      !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1263    EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs);
1264    operatorDeleteCleanup = EHStack.stable_begin();
1265    cleanupDominator = Builder.CreateUnreachable();
1266  }
1267
1268  assert((allocSize == allocSizeWithoutCookie) ==
1269         CalculateCookiePadding(*this, E).isZero());
1270  if (allocSize != allocSizeWithoutCookie) {
1271    assert(E->isArray());
1272    allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
1273                                                       numElements,
1274                                                       E, allocType);
1275  }
1276
1277  llvm::Type *elementPtrTy
1278    = ConvertTypeForMem(allocType)->getPointerTo(AS);
1279  llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy);
1280
1281  EmitNewInitializer(*this, E, allocType, result, numElements,
1282                     allocSizeWithoutCookie);
1283  if (E->isArray()) {
1284    // NewPtr is a pointer to the base element type.  If we're
1285    // allocating an array of arrays, we'll need to cast back to the
1286    // array pointer type.
1287    llvm::Type *resultType = ConvertTypeForMem(E->getType());
1288    if (result->getType() != resultType)
1289      result = Builder.CreateBitCast(result, resultType);
1290  }
1291
1292  // Deactivate the 'operator delete' cleanup if we finished
1293  // initialization.
1294  if (operatorDeleteCleanup.isValid()) {
1295    DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
1296    cleanupDominator->eraseFromParent();
1297  }
1298
1299  if (nullCheck) {
1300    conditional.end(*this);
1301
1302    llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
1303    EmitBlock(contBB);
1304
1305    llvm::PHINode *PHI = Builder.CreatePHI(result->getType(), 2);
1306    PHI->addIncoming(result, notNullBB);
1307    PHI->addIncoming(llvm::Constant::getNullValue(result->getType()),
1308                     nullCheckBB);
1309
1310    result = PHI;
1311  }
1312
1313  return result;
1314}
1315
1316void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1317                                     llvm::Value *Ptr,
1318                                     QualType DeleteTy) {
1319  assert(DeleteFD->getOverloadedOperator() == OO_Delete);
1320
1321  const FunctionProtoType *DeleteFTy =
1322    DeleteFD->getType()->getAs<FunctionProtoType>();
1323
1324  CallArgList DeleteArgs;
1325
1326  // Check if we need to pass the size to the delete operator.
1327  llvm::Value *Size = 0;
1328  QualType SizeTy;
1329  if (DeleteFTy->getNumArgs() == 2) {
1330    SizeTy = DeleteFTy->getArgType(1);
1331    CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
1332    Size = llvm::ConstantInt::get(ConvertType(SizeTy),
1333                                  DeleteTypeSize.getQuantity());
1334  }
1335
1336  QualType ArgTy = DeleteFTy->getArgType(0);
1337  llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
1338  DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
1339
1340  if (Size)
1341    DeleteArgs.add(RValue::get(Size), SizeTy);
1342
1343  // Emit the call to delete.
1344  EmitCall(CGM.getTypes().arrangeFreeFunctionCall(DeleteArgs, DeleteFTy),
1345           CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(),
1346           DeleteArgs, DeleteFD);
1347}
1348
1349namespace {
1350  /// Calls the given 'operator delete' on a single object.
1351  struct CallObjectDelete : EHScopeStack::Cleanup {
1352    llvm::Value *Ptr;
1353    const FunctionDecl *OperatorDelete;
1354    QualType ElementType;
1355
1356    CallObjectDelete(llvm::Value *Ptr,
1357                     const FunctionDecl *OperatorDelete,
1358                     QualType ElementType)
1359      : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1360
1361    void Emit(CodeGenFunction &CGF, Flags flags) {
1362      CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
1363    }
1364  };
1365}
1366
1367/// Emit the code for deleting a single object.
1368static void EmitObjectDelete(CodeGenFunction &CGF,
1369                             const FunctionDecl *OperatorDelete,
1370                             llvm::Value *Ptr,
1371                             QualType ElementType,
1372                             bool UseGlobalDelete) {
1373  // Find the destructor for the type, if applicable.  If the
1374  // destructor is virtual, we'll just emit the vcall and return.
1375  const CXXDestructorDecl *Dtor = 0;
1376  if (const RecordType *RT = ElementType->getAs<RecordType>()) {
1377    CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1378    if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
1379      Dtor = RD->getDestructor();
1380
1381      if (Dtor->isVirtual()) {
1382        if (UseGlobalDelete) {
1383          // If we're supposed to call the global delete, make sure we do so
1384          // even if the destructor throws.
1385          CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1386                                                    Ptr, OperatorDelete,
1387                                                    ElementType);
1388        }
1389
1390        llvm::Type *Ty =
1391          CGF.getTypes().GetFunctionType(
1392                         CGF.getTypes().arrangeCXXDestructor(Dtor, Dtor_Complete));
1393
1394        llvm::Value *Callee
1395          = CGF.BuildVirtualCall(Dtor,
1396                                 UseGlobalDelete? Dtor_Complete : Dtor_Deleting,
1397                                 Ptr, Ty);
1398        CGF.EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0,
1399                              0, 0);
1400
1401        if (UseGlobalDelete) {
1402          CGF.PopCleanupBlock();
1403        }
1404
1405        return;
1406      }
1407    }
1408  }
1409
1410  // Make sure that we call delete even if the dtor throws.
1411  // This doesn't have to a conditional cleanup because we're going
1412  // to pop it off in a second.
1413  CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1414                                            Ptr, OperatorDelete, ElementType);
1415
1416  if (Dtor)
1417    CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1418                              /*ForVirtualBase=*/false, Ptr);
1419  else if (CGF.getLangOpts().ObjCAutoRefCount &&
1420           ElementType->isObjCLifetimeType()) {
1421    switch (ElementType.getObjCLifetime()) {
1422    case Qualifiers::OCL_None:
1423    case Qualifiers::OCL_ExplicitNone:
1424    case Qualifiers::OCL_Autoreleasing:
1425      break;
1426
1427    case Qualifiers::OCL_Strong: {
1428      // Load the pointer value.
1429      llvm::Value *PtrValue = CGF.Builder.CreateLoad(Ptr,
1430                                             ElementType.isVolatileQualified());
1431
1432      CGF.EmitARCRelease(PtrValue, /*precise*/ true);
1433      break;
1434    }
1435
1436    case Qualifiers::OCL_Weak:
1437      CGF.EmitARCDestroyWeak(Ptr);
1438      break;
1439    }
1440  }
1441
1442  CGF.PopCleanupBlock();
1443}
1444
1445namespace {
1446  /// Calls the given 'operator delete' on an array of objects.
1447  struct CallArrayDelete : EHScopeStack::Cleanup {
1448    llvm::Value *Ptr;
1449    const FunctionDecl *OperatorDelete;
1450    llvm::Value *NumElements;
1451    QualType ElementType;
1452    CharUnits CookieSize;
1453
1454    CallArrayDelete(llvm::Value *Ptr,
1455                    const FunctionDecl *OperatorDelete,
1456                    llvm::Value *NumElements,
1457                    QualType ElementType,
1458                    CharUnits CookieSize)
1459      : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
1460        ElementType(ElementType), CookieSize(CookieSize) {}
1461
1462    void Emit(CodeGenFunction &CGF, Flags flags) {
1463      const FunctionProtoType *DeleteFTy =
1464        OperatorDelete->getType()->getAs<FunctionProtoType>();
1465      assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2);
1466
1467      CallArgList Args;
1468
1469      // Pass the pointer as the first argument.
1470      QualType VoidPtrTy = DeleteFTy->getArgType(0);
1471      llvm::Value *DeletePtr
1472        = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
1473      Args.add(RValue::get(DeletePtr), VoidPtrTy);
1474
1475      // Pass the original requested size as the second argument.
1476      if (DeleteFTy->getNumArgs() == 2) {
1477        QualType size_t = DeleteFTy->getArgType(1);
1478        llvm::IntegerType *SizeTy
1479          = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
1480
1481        CharUnits ElementTypeSize =
1482          CGF.CGM.getContext().getTypeSizeInChars(ElementType);
1483
1484        // The size of an element, multiplied by the number of elements.
1485        llvm::Value *Size
1486          = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
1487        Size = CGF.Builder.CreateMul(Size, NumElements);
1488
1489        // Plus the size of the cookie if applicable.
1490        if (!CookieSize.isZero()) {
1491          llvm::Value *CookieSizeV
1492            = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
1493          Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
1494        }
1495
1496        Args.add(RValue::get(Size), size_t);
1497      }
1498
1499      // Emit the call to delete.
1500      CGF.EmitCall(CGF.getTypes().arrangeFreeFunctionCall(Args, DeleteFTy),
1501                   CGF.CGM.GetAddrOfFunction(OperatorDelete),
1502                   ReturnValueSlot(), Args, OperatorDelete);
1503    }
1504  };
1505}
1506
1507/// Emit the code for deleting an array of objects.
1508static void EmitArrayDelete(CodeGenFunction &CGF,
1509                            const CXXDeleteExpr *E,
1510                            llvm::Value *deletedPtr,
1511                            QualType elementType) {
1512  llvm::Value *numElements = 0;
1513  llvm::Value *allocatedPtr = 0;
1514  CharUnits cookieSize;
1515  CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,
1516                                      numElements, allocatedPtr, cookieSize);
1517
1518  assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
1519
1520  // Make sure that we call delete even if one of the dtors throws.
1521  const FunctionDecl *operatorDelete = E->getOperatorDelete();
1522  CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
1523                                           allocatedPtr, operatorDelete,
1524                                           numElements, elementType,
1525                                           cookieSize);
1526
1527  // Destroy the elements.
1528  if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
1529    assert(numElements && "no element count for a type with a destructor!");
1530
1531    llvm::Value *arrayEnd =
1532      CGF.Builder.CreateInBoundsGEP(deletedPtr, numElements, "delete.end");
1533
1534    // Note that it is legal to allocate a zero-length array, and we
1535    // can never fold the check away because the length should always
1536    // come from a cookie.
1537    CGF.emitArrayDestroy(deletedPtr, arrayEnd, elementType,
1538                         CGF.getDestroyer(dtorKind),
1539                         /*checkZeroLength*/ true,
1540                         CGF.needsEHCleanup(dtorKind));
1541  }
1542
1543  // Pop the cleanup block.
1544  CGF.PopCleanupBlock();
1545}
1546
1547void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
1548  const Expr *Arg = E->getArgument();
1549  llvm::Value *Ptr = EmitScalarExpr(Arg);
1550
1551  // Null check the pointer.
1552  llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
1553  llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
1554
1555  llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull");
1556
1557  Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
1558  EmitBlock(DeleteNotNull);
1559
1560  // We might be deleting a pointer to array.  If so, GEP down to the
1561  // first non-array element.
1562  // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
1563  QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
1564  if (DeleteTy->isConstantArrayType()) {
1565    llvm::Value *Zero = Builder.getInt32(0);
1566    SmallVector<llvm::Value*,8> GEP;
1567
1568    GEP.push_back(Zero); // point at the outermost array
1569
1570    // For each layer of array type we're pointing at:
1571    while (const ConstantArrayType *Arr
1572             = getContext().getAsConstantArrayType(DeleteTy)) {
1573      // 1. Unpeel the array type.
1574      DeleteTy = Arr->getElementType();
1575
1576      // 2. GEP to the first element of the array.
1577      GEP.push_back(Zero);
1578    }
1579
1580    Ptr = Builder.CreateInBoundsGEP(Ptr, GEP, "del.first");
1581  }
1582
1583  assert(ConvertTypeForMem(DeleteTy) ==
1584         cast<llvm::PointerType>(Ptr->getType())->getElementType());
1585
1586  if (E->isArrayForm()) {
1587    EmitArrayDelete(*this, E, Ptr, DeleteTy);
1588  } else {
1589    EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy,
1590                     E->isGlobalDelete());
1591  }
1592
1593  EmitBlock(DeleteEnd);
1594}
1595
1596static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
1597  // void __cxa_bad_typeid();
1598  llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1599
1600  return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1601}
1602
1603static void EmitBadTypeidCall(CodeGenFunction &CGF) {
1604  llvm::Value *Fn = getBadTypeidFn(CGF);
1605  CGF.EmitCallOrInvoke(Fn).setDoesNotReturn();
1606  CGF.Builder.CreateUnreachable();
1607}
1608
1609static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF,
1610                                         const Expr *E,
1611                                         llvm::Type *StdTypeInfoPtrTy) {
1612  // Get the vtable pointer.
1613  llvm::Value *ThisPtr = CGF.EmitLValue(E).getAddress();
1614
1615  // C++ [expr.typeid]p2:
1616  //   If the glvalue expression is obtained by applying the unary * operator to
1617  //   a pointer and the pointer is a null pointer value, the typeid expression
1618  //   throws the std::bad_typeid exception.
1619  if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E->IgnoreParens())) {
1620    if (UO->getOpcode() == UO_Deref) {
1621      llvm::BasicBlock *BadTypeidBlock =
1622        CGF.createBasicBlock("typeid.bad_typeid");
1623      llvm::BasicBlock *EndBlock =
1624        CGF.createBasicBlock("typeid.end");
1625
1626      llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr);
1627      CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
1628
1629      CGF.EmitBlock(BadTypeidBlock);
1630      EmitBadTypeidCall(CGF);
1631      CGF.EmitBlock(EndBlock);
1632    }
1633  }
1634
1635  llvm::Value *Value = CGF.GetVTablePtr(ThisPtr,
1636                                        StdTypeInfoPtrTy->getPointerTo());
1637
1638  // Load the type info.
1639  Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
1640  return CGF.Builder.CreateLoad(Value);
1641}
1642
1643llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
1644  llvm::Type *StdTypeInfoPtrTy =
1645    ConvertType(E->getType())->getPointerTo();
1646
1647  if (E->isTypeOperand()) {
1648    llvm::Constant *TypeInfo =
1649      CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand());
1650    return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
1651  }
1652
1653  // C++ [expr.typeid]p2:
1654  //   When typeid is applied to a glvalue expression whose type is a
1655  //   polymorphic class type, the result refers to a std::type_info object
1656  //   representing the type of the most derived object (that is, the dynamic
1657  //   type) to which the glvalue refers.
1658  if (E->isPotentiallyEvaluated())
1659    return EmitTypeidFromVTable(*this, E->getExprOperand(),
1660                                StdTypeInfoPtrTy);
1661
1662  QualType OperandTy = E->getExprOperand()->getType();
1663  return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
1664                               StdTypeInfoPtrTy);
1665}
1666
1667static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) {
1668  // void *__dynamic_cast(const void *sub,
1669  //                      const abi::__class_type_info *src,
1670  //                      const abi::__class_type_info *dst,
1671  //                      std::ptrdiff_t src2dst_offset);
1672
1673  llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1674  llvm::Type *PtrDiffTy =
1675    CGF.ConvertType(CGF.getContext().getPointerDiffType());
1676
1677  llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1678
1679  llvm::FunctionType *FTy =
1680    llvm::FunctionType::get(Int8PtrTy, Args, false);
1681
1682  return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast");
1683}
1684
1685static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
1686  // void __cxa_bad_cast();
1687  llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1688  return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1689}
1690
1691static void EmitBadCastCall(CodeGenFunction &CGF) {
1692  llvm::Value *Fn = getBadCastFn(CGF);
1693  CGF.EmitCallOrInvoke(Fn).setDoesNotReturn();
1694  CGF.Builder.CreateUnreachable();
1695}
1696
1697static llvm::Value *
1698EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value,
1699                    QualType SrcTy, QualType DestTy,
1700                    llvm::BasicBlock *CastEnd) {
1701  llvm::Type *PtrDiffLTy =
1702    CGF.ConvertType(CGF.getContext().getPointerDiffType());
1703  llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1704
1705  if (const PointerType *PTy = DestTy->getAs<PointerType>()) {
1706    if (PTy->getPointeeType()->isVoidType()) {
1707      // C++ [expr.dynamic.cast]p7:
1708      //   If T is "pointer to cv void," then the result is a pointer to the
1709      //   most derived object pointed to by v.
1710
1711      // Get the vtable pointer.
1712      llvm::Value *VTable = CGF.GetVTablePtr(Value, PtrDiffLTy->getPointerTo());
1713
1714      // Get the offset-to-top from the vtable.
1715      llvm::Value *OffsetToTop =
1716        CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
1717      OffsetToTop = CGF.Builder.CreateLoad(OffsetToTop, "offset.to.top");
1718
1719      // Finally, add the offset to the pointer.
1720      Value = CGF.EmitCastToVoidPtr(Value);
1721      Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
1722
1723      return CGF.Builder.CreateBitCast(Value, DestLTy);
1724    }
1725  }
1726
1727  QualType SrcRecordTy;
1728  QualType DestRecordTy;
1729
1730  if (const PointerType *DestPTy = DestTy->getAs<PointerType>()) {
1731    SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
1732    DestRecordTy = DestPTy->getPointeeType();
1733  } else {
1734    SrcRecordTy = SrcTy;
1735    DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
1736  }
1737
1738  assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
1739  assert(DestRecordTy->isRecordType() && "dest type must be a record type!");
1740
1741  llvm::Value *SrcRTTI =
1742    CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1743  llvm::Value *DestRTTI =
1744    CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1745
1746  // FIXME: Actually compute a hint here.
1747  llvm::Value *OffsetHint = llvm::ConstantInt::get(PtrDiffLTy, -1ULL);
1748
1749  // Emit the call to __dynamic_cast.
1750  Value = CGF.EmitCastToVoidPtr(Value);
1751  Value = CGF.Builder.CreateCall4(getDynamicCastFn(CGF), Value,
1752                                  SrcRTTI, DestRTTI, OffsetHint);
1753  Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1754
1755  /// C++ [expr.dynamic.cast]p9:
1756  ///   A failed cast to reference type throws std::bad_cast
1757  if (DestTy->isReferenceType()) {
1758    llvm::BasicBlock *BadCastBlock =
1759      CGF.createBasicBlock("dynamic_cast.bad_cast");
1760
1761    llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1762    CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1763
1764    CGF.EmitBlock(BadCastBlock);
1765    EmitBadCastCall(CGF);
1766  }
1767
1768  return Value;
1769}
1770
1771static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
1772                                          QualType DestTy) {
1773  llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1774  if (DestTy->isPointerType())
1775    return llvm::Constant::getNullValue(DestLTy);
1776
1777  /// C++ [expr.dynamic.cast]p9:
1778  ///   A failed cast to reference type throws std::bad_cast
1779  EmitBadCastCall(CGF);
1780
1781  CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
1782  return llvm::UndefValue::get(DestLTy);
1783}
1784
1785llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
1786                                              const CXXDynamicCastExpr *DCE) {
1787  QualType DestTy = DCE->getTypeAsWritten();
1788
1789  if (DCE->isAlwaysNull())
1790    return EmitDynamicCastToNull(*this, DestTy);
1791
1792  QualType SrcTy = DCE->getSubExpr()->getType();
1793
1794  // C++ [expr.dynamic.cast]p4:
1795  //   If the value of v is a null pointer value in the pointer case, the result
1796  //   is the null pointer value of type T.
1797  bool ShouldNullCheckSrcValue = SrcTy->isPointerType();
1798
1799  llvm::BasicBlock *CastNull = 0;
1800  llvm::BasicBlock *CastNotNull = 0;
1801  llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
1802
1803  if (ShouldNullCheckSrcValue) {
1804    CastNull = createBasicBlock("dynamic_cast.null");
1805    CastNotNull = createBasicBlock("dynamic_cast.notnull");
1806
1807    llvm::Value *IsNull = Builder.CreateIsNull(Value);
1808    Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
1809    EmitBlock(CastNotNull);
1810  }
1811
1812  Value = EmitDynamicCastCall(*this, Value, SrcTy, DestTy, CastEnd);
1813
1814  if (ShouldNullCheckSrcValue) {
1815    EmitBranch(CastEnd);
1816
1817    EmitBlock(CastNull);
1818    EmitBranch(CastEnd);
1819  }
1820
1821  EmitBlock(CastEnd);
1822
1823  if (ShouldNullCheckSrcValue) {
1824    llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
1825    PHI->addIncoming(Value, CastNotNull);
1826    PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
1827
1828    Value = PHI;
1829  }
1830
1831  return Value;
1832}
1833
1834void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
1835  RunCleanupsScope Scope(*this);
1836  LValue SlotLV = MakeAddrLValue(Slot.getAddr(), E->getType(),
1837                                 Slot.getAlignment());
1838
1839  CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
1840  for (LambdaExpr::capture_init_iterator i = E->capture_init_begin(),
1841                                         e = E->capture_init_end();
1842       i != e; ++i, ++CurField) {
1843    // Emit initialization
1844
1845    LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
1846    ArrayRef<VarDecl *> ArrayIndexes;
1847    if (CurField->getType()->isArrayType())
1848      ArrayIndexes = E->getCaptureInitIndexVars(i);
1849    EmitInitializerForField(*CurField, LV, *i, ArrayIndexes);
1850  }
1851}
1852