CGExprCXX.cpp revision 2726267f094a0c1f5ac5b501ec5a9898c58876bf
1//===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This contains code dealing with code generation of C++ expressions
11//
12//===----------------------------------------------------------------------===//
13
14#include "clang/Frontend/CodeGenOptions.h"
15#include "CodeGenFunction.h"
16#include "CGCXXABI.h"
17#include "CGObjCRuntime.h"
18#include "CGDebugInfo.h"
19#include "llvm/Intrinsics.h"
20using namespace clang;
21using namespace CodeGen;
22
23RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
24                                          llvm::Value *Callee,
25                                          ReturnValueSlot ReturnValue,
26                                          llvm::Value *This,
27                                          llvm::Value *VTT,
28                                          CallExpr::const_arg_iterator ArgBeg,
29                                          CallExpr::const_arg_iterator ArgEnd) {
30  assert(MD->isInstance() &&
31         "Trying to emit a member call expr on a static method!");
32
33  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
34
35  CallArgList Args;
36
37  // Push the this ptr.
38  Args.push_back(std::make_pair(RValue::get(This),
39                                MD->getThisType(getContext())));
40
41  // If there is a VTT parameter, emit it.
42  if (VTT) {
43    QualType T = getContext().getPointerType(getContext().VoidPtrTy);
44    Args.push_back(std::make_pair(RValue::get(VTT), T));
45  }
46
47  // And the rest of the call args
48  EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
49
50  QualType ResultType = FPT->getResultType();
51  return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args,
52                                                 FPT->getExtInfo()),
53                  Callee, ReturnValue, Args, MD);
54}
55
56/// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
57/// expr can be devirtualized.
58static bool canDevirtualizeMemberFunctionCalls(const Expr *Base,
59                                               const CXXMethodDecl *MD) {
60
61  // If the member function has the "final" attribute, we know that it can't be
62  // overridden and can therefore devirtualize it.
63  if (MD->hasAttr<FinalAttr>())
64    return true;
65
66  // Similarly, if the class itself has the "final" attribute it can't be
67  // overridden and we can therefore devirtualize the member function call.
68  if (MD->getParent()->hasAttr<FinalAttr>())
69    return true;
70
71  if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
72    if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
73      // This is a record decl. We know the type and can devirtualize it.
74      return VD->getType()->isRecordType();
75    }
76
77    return false;
78  }
79
80  // We can always devirtualize calls on temporary object expressions.
81  if (isa<CXXConstructExpr>(Base))
82    return true;
83
84  // And calls on bound temporaries.
85  if (isa<CXXBindTemporaryExpr>(Base))
86    return true;
87
88  // Check if this is a call expr that returns a record type.
89  if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
90    return CE->getCallReturnType()->isRecordType();
91
92  // We can't devirtualize the call.
93  return false;
94}
95
96// Note: This function also emit constructor calls to support a MSVC
97// extensions allowing explicit constructor function call.
98RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
99                                              ReturnValueSlot ReturnValue) {
100  if (isa<BinaryOperator>(CE->getCallee()->IgnoreParens()))
101    return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
102
103  const MemberExpr *ME = cast<MemberExpr>(CE->getCallee()->IgnoreParens());
104  const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
105
106  CGDebugInfo *DI = getDebugInfo();
107  if (DI && CGM.getCodeGenOpts().LimitDebugInfo
108      && !isa<CallExpr>(ME->getBase())) {
109    QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType();
110    if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) {
111      DI->getOrCreateRecordType(PTy->getPointeeType(),
112                                MD->getParent()->getLocation());
113    }
114  }
115
116  if (MD->isStatic()) {
117    // The method is static, emit it as we would a regular call.
118    llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
119    return EmitCall(getContext().getPointerType(MD->getType()), Callee,
120                    ReturnValue, CE->arg_begin(), CE->arg_end());
121  }
122
123  // Compute the object pointer.
124  llvm::Value *This;
125  if (ME->isArrow())
126    This = EmitScalarExpr(ME->getBase());
127  else
128    This = EmitLValue(ME->getBase()).getAddress();
129
130  if (MD->isTrivial()) {
131    if (isa<CXXDestructorDecl>(MD)) return RValue::get(0);
132    if (isa<CXXConstructorDecl>(MD) &&
133        cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
134      return RValue::get(0);
135
136    if (MD->isCopyAssignmentOperator()) {
137      // We don't like to generate the trivial copy assignment operator when
138      // it isn't necessary; just produce the proper effect here.
139      llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
140      EmitAggregateCopy(This, RHS, CE->getType());
141      return RValue::get(This);
142    }
143
144    if (isa<CXXConstructorDecl>(MD) &&
145        cast<CXXConstructorDecl>(MD)->isCopyConstructor()) {
146      llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
147      EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS,
148                                     CE->arg_begin(), CE->arg_end());
149      return RValue::get(This);
150    }
151    llvm_unreachable("unknown trivial member function");
152  }
153
154  // Compute the function type we're calling.
155  const CGFunctionInfo *FInfo = 0;
156  if (isa<CXXDestructorDecl>(MD))
157    FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXDestructorDecl>(MD),
158                                           Dtor_Complete);
159  else if (isa<CXXConstructorDecl>(MD))
160    FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXConstructorDecl>(MD),
161                                            Ctor_Complete);
162  else
163    FInfo = &CGM.getTypes().getFunctionInfo(MD);
164
165  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
166  const llvm::Type *Ty
167    = CGM.getTypes().GetFunctionType(*FInfo, FPT->isVariadic());
168
169  // C++ [class.virtual]p12:
170  //   Explicit qualification with the scope operator (5.1) suppresses the
171  //   virtual call mechanism.
172  //
173  // We also don't emit a virtual call if the base expression has a record type
174  // because then we know what the type is.
175  bool UseVirtualCall;
176  if (!getContext().getLangOptions().AppleKext)
177    UseVirtualCall = MD->isVirtual() && !ME->hasQualifier()
178                     && !canDevirtualizeMemberFunctionCalls(ME->getBase(), MD);
179  else
180    UseVirtualCall = MD->isVirtual();
181
182  llvm::Value *Callee;
183  if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
184    if (UseVirtualCall) {
185      Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty);
186    } else {
187      Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty);
188    }
189  } else if (const CXXConstructorDecl *Ctor =
190               dyn_cast<CXXConstructorDecl>(MD)) {
191    Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
192  } else if (UseVirtualCall) {
193    if (getContext().getLangOptions().AppleKext &&
194        ME->hasQualifier()) {
195      Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), This, Ty);
196    }
197    else
198      Callee = BuildVirtualCall(MD, This, Ty);
199  } else {
200    Callee = CGM.GetAddrOfFunction(MD, Ty);
201  }
202
203  return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
204                           CE->arg_begin(), CE->arg_end());
205}
206
207RValue
208CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
209                                              ReturnValueSlot ReturnValue) {
210  const BinaryOperator *BO =
211      cast<BinaryOperator>(E->getCallee()->IgnoreParens());
212  const Expr *BaseExpr = BO->getLHS();
213  const Expr *MemFnExpr = BO->getRHS();
214
215  const MemberPointerType *MPT =
216    MemFnExpr->getType()->getAs<MemberPointerType>();
217
218  const FunctionProtoType *FPT =
219    MPT->getPointeeType()->getAs<FunctionProtoType>();
220  const CXXRecordDecl *RD =
221    cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
222
223  // Get the member function pointer.
224  llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
225
226  // Emit the 'this' pointer.
227  llvm::Value *This;
228
229  if (BO->getOpcode() == BO_PtrMemI)
230    This = EmitScalarExpr(BaseExpr);
231  else
232    This = EmitLValue(BaseExpr).getAddress();
233
234  // Ask the ABI to load the callee.  Note that This is modified.
235  llvm::Value *Callee =
236    CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(CGF, This, MemFnPtr, MPT);
237
238  CallArgList Args;
239
240  QualType ThisType =
241    getContext().getPointerType(getContext().getTagDeclType(RD));
242
243  // Push the this ptr.
244  Args.push_back(std::make_pair(RValue::get(This), ThisType));
245
246  // And the rest of the call args
247  EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
248  const FunctionType *BO_FPT = BO->getType()->getAs<FunctionProtoType>();
249  return EmitCall(CGM.getTypes().getFunctionInfo(Args, BO_FPT), Callee,
250                  ReturnValue, Args);
251}
252
253RValue
254CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
255                                               const CXXMethodDecl *MD,
256                                               ReturnValueSlot ReturnValue) {
257  assert(MD->isInstance() &&
258         "Trying to emit a member call expr on a static method!");
259  LValue LV = EmitLValue(E->getArg(0));
260  llvm::Value *This = LV.getAddress();
261
262  if (MD->isCopyAssignmentOperator()) {
263    const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(MD->getDeclContext());
264    if (ClassDecl->hasTrivialCopyAssignment()) {
265      assert(!ClassDecl->hasUserDeclaredCopyAssignment() &&
266             "EmitCXXOperatorMemberCallExpr - user declared copy assignment");
267      llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
268      QualType Ty = E->getType();
269      EmitAggregateCopy(This, Src, Ty);
270      return RValue::get(This);
271    }
272  }
273
274  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
275  const llvm::Type *Ty =
276    CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
277                                   FPT->isVariadic());
278  llvm::Value *Callee;
279  if (MD->isVirtual() &&
280      (getContext().getLangOptions().AppleKext ||
281       !canDevirtualizeMemberFunctionCalls(E->getArg(0), MD)))
282    Callee = BuildVirtualCall(MD, This, Ty);
283  else
284    Callee = CGM.GetAddrOfFunction(MD, Ty);
285
286  return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
287                           E->arg_begin() + 1, E->arg_end());
288}
289
290void
291CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
292                                      AggValueSlot Dest) {
293  assert(!Dest.isIgnored() && "Must have a destination!");
294  const CXXConstructorDecl *CD = E->getConstructor();
295
296  // If we require zero initialization before (or instead of) calling the
297  // constructor, as can be the case with a non-user-provided default
298  // constructor, emit the zero initialization now.
299  if (E->requiresZeroInitialization())
300    EmitNullInitialization(Dest.getAddr(), E->getType());
301
302  // If this is a call to a trivial default constructor, do nothing.
303  if (CD->isTrivial() && CD->isDefaultConstructor())
304    return;
305
306  // Elide the constructor if we're constructing from a temporary.
307  // The temporary check is required because Sema sets this on NRVO
308  // returns.
309  if (getContext().getLangOptions().ElideConstructors && E->isElidable()) {
310    assert(getContext().hasSameUnqualifiedType(E->getType(),
311                                               E->getArg(0)->getType()));
312    if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
313      EmitAggExpr(E->getArg(0), Dest);
314      return;
315    }
316  }
317
318  const ConstantArrayType *Array
319    = getContext().getAsConstantArrayType(E->getType());
320  if (Array) {
321    QualType BaseElementTy = getContext().getBaseElementType(Array);
322    const llvm::Type *BasePtr = ConvertType(BaseElementTy);
323    BasePtr = llvm::PointerType::getUnqual(BasePtr);
324    llvm::Value *BaseAddrPtr =
325      Builder.CreateBitCast(Dest.getAddr(), BasePtr);
326
327    EmitCXXAggrConstructorCall(CD, Array, BaseAddrPtr,
328                               E->arg_begin(), E->arg_end());
329  }
330  else {
331    CXXCtorType Type =
332      (E->getConstructionKind() == CXXConstructExpr::CK_Complete)
333      ? Ctor_Complete : Ctor_Base;
334    bool ForVirtualBase =
335      E->getConstructionKind() == CXXConstructExpr::CK_VirtualBase;
336
337    // Call the constructor.
338    EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest.getAddr(),
339                           E->arg_begin(), E->arg_end());
340  }
341}
342
343void
344CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
345                                            llvm::Value *Src,
346                                            const Expr *Exp) {
347  if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
348    Exp = E->getSubExpr();
349  assert(isa<CXXConstructExpr>(Exp) &&
350         "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
351  const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
352  const CXXConstructorDecl *CD = E->getConstructor();
353  RunCleanupsScope Scope(*this);
354
355  // If we require zero initialization before (or instead of) calling the
356  // constructor, as can be the case with a non-user-provided default
357  // constructor, emit the zero initialization now.
358  // FIXME. Do I still need this for a copy ctor synthesis?
359  if (E->requiresZeroInitialization())
360    EmitNullInitialization(Dest, E->getType());
361
362  assert(!getContext().getAsConstantArrayType(E->getType())
363         && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
364  EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src,
365                                 E->arg_begin(), E->arg_end());
366}
367
368/// Check whether the given operator new[] is the global placement
369/// operator new[].
370static bool IsPlacementOperatorNewArray(ASTContext &Ctx,
371                                        const FunctionDecl *Fn) {
372  // Must be in global scope.  Note that allocation functions can't be
373  // declared in namespaces.
374  if (!Fn->getDeclContext()->getRedeclContext()->isFileContext())
375    return false;
376
377  // Signature must be void *operator new[](size_t, void*).
378  // The size_t is common to all operator new[]s.
379  if (Fn->getNumParams() != 2)
380    return false;
381
382  CanQualType ParamType = Ctx.getCanonicalType(Fn->getParamDecl(1)->getType());
383  return (ParamType == Ctx.VoidPtrTy);
384}
385
386static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
387                                        const CXXNewExpr *E) {
388  if (!E->isArray())
389    return CharUnits::Zero();
390
391  // No cookie is required if the new operator being used is
392  // ::operator new[](size_t, void*).
393  const FunctionDecl *OperatorNew = E->getOperatorNew();
394  if (IsPlacementOperatorNewArray(CGF.getContext(), OperatorNew))
395    return CharUnits::Zero();
396
397  return CGF.CGM.getCXXABI().GetArrayCookieSize(E->getAllocatedType());
398}
399
400static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context,
401                                        CodeGenFunction &CGF,
402                                        const CXXNewExpr *E,
403                                        llvm::Value *&NumElements,
404                                        llvm::Value *&SizeWithoutCookie) {
405  QualType ElemType = E->getAllocatedType();
406
407  const llvm::IntegerType *SizeTy =
408    cast<llvm::IntegerType>(CGF.ConvertType(CGF.getContext().getSizeType()));
409
410  CharUnits TypeSize = CGF.getContext().getTypeSizeInChars(ElemType);
411
412  if (!E->isArray()) {
413    SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
414    return SizeWithoutCookie;
415  }
416
417  // Figure out the cookie size.
418  CharUnits CookieSize = CalculateCookiePadding(CGF, E);
419
420  // Emit the array size expression.
421  // We multiply the size of all dimensions for NumElements.
422  // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
423  NumElements = CGF.EmitScalarExpr(E->getArraySize());
424  assert(NumElements->getType() == SizeTy && "element count not a size_t");
425
426  uint64_t ArraySizeMultiplier = 1;
427  while (const ConstantArrayType *CAT
428             = CGF.getContext().getAsConstantArrayType(ElemType)) {
429    ElemType = CAT->getElementType();
430    ArraySizeMultiplier *= CAT->getSize().getZExtValue();
431  }
432
433  llvm::Value *Size;
434
435  // If someone is doing 'new int[42]' there is no need to do a dynamic check.
436  // Don't bloat the -O0 code.
437  if (llvm::ConstantInt *NumElementsC =
438        dyn_cast<llvm::ConstantInt>(NumElements)) {
439    llvm::APInt NEC = NumElementsC->getValue();
440    unsigned SizeWidth = NEC.getBitWidth();
441
442    // Determine if there is an overflow here by doing an extended multiply.
443    NEC = NEC.zext(SizeWidth*2);
444    llvm::APInt SC(SizeWidth*2, TypeSize.getQuantity());
445    SC *= NEC;
446
447    if (!CookieSize.isZero()) {
448      // Save the current size without a cookie.  We don't care if an
449      // overflow's already happened because SizeWithoutCookie isn't
450      // used if the allocator returns null or throws, as it should
451      // always do on an overflow.
452      llvm::APInt SWC = SC.trunc(SizeWidth);
453      SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, SWC);
454
455      // Add the cookie size.
456      SC += llvm::APInt(SizeWidth*2, CookieSize.getQuantity());
457    }
458
459    if (SC.countLeadingZeros() >= SizeWidth) {
460      SC = SC.trunc(SizeWidth);
461      Size = llvm::ConstantInt::get(SizeTy, SC);
462    } else {
463      // On overflow, produce a -1 so operator new throws.
464      Size = llvm::Constant::getAllOnesValue(SizeTy);
465    }
466
467    // Scale NumElements while we're at it.
468    uint64_t N = NEC.getZExtValue() * ArraySizeMultiplier;
469    NumElements = llvm::ConstantInt::get(SizeTy, N);
470
471  // Otherwise, we don't need to do an overflow-checked multiplication if
472  // we're multiplying by one.
473  } else if (TypeSize.isOne()) {
474    assert(ArraySizeMultiplier == 1);
475
476    Size = NumElements;
477
478    // If we need a cookie, add its size in with an overflow check.
479    // This is maybe a little paranoid.
480    if (!CookieSize.isZero()) {
481      SizeWithoutCookie = Size;
482
483      llvm::Value *CookieSizeV
484        = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
485
486      const llvm::Type *Types[] = { SizeTy };
487      llvm::Value *UAddF
488        = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1);
489      llvm::Value *AddRes
490        = CGF.Builder.CreateCall2(UAddF, Size, CookieSizeV);
491
492      Size = CGF.Builder.CreateExtractValue(AddRes, 0);
493      llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1);
494      Size = CGF.Builder.CreateSelect(DidOverflow,
495                                      llvm::ConstantInt::get(SizeTy, -1),
496                                      Size);
497    }
498
499  // Otherwise use the int.umul.with.overflow intrinsic.
500  } else {
501    llvm::Value *OutermostElementSize
502      = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
503
504    llvm::Value *NumOutermostElements = NumElements;
505
506    // Scale NumElements by the array size multiplier.  This might
507    // overflow, but only if the multiplication below also overflows,
508    // in which case this multiplication isn't used.
509    if (ArraySizeMultiplier != 1)
510      NumElements = CGF.Builder.CreateMul(NumElements,
511                         llvm::ConstantInt::get(SizeTy, ArraySizeMultiplier));
512
513    // The requested size of the outermost array is non-constant.
514    // Multiply that by the static size of the elements of that array;
515    // on unsigned overflow, set the size to -1 to trigger an
516    // exception from the allocation routine.  This is sufficient to
517    // prevent buffer overruns from the allocator returning a
518    // seemingly valid pointer to insufficient space.  This idea comes
519    // originally from MSVC, and GCC has an open bug requesting
520    // similar behavior:
521    //   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=19351
522    //
523    // This will not be sufficient for C++0x, which requires a
524    // specific exception class (std::bad_array_new_length).
525    // That will require ABI support that has not yet been specified.
526    const llvm::Type *Types[] = { SizeTy };
527    llvm::Value *UMulF
528      = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, Types, 1);
529    llvm::Value *MulRes = CGF.Builder.CreateCall2(UMulF, NumOutermostElements,
530                                                  OutermostElementSize);
531
532    // The overflow bit.
533    llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(MulRes, 1);
534
535    // The result of the multiplication.
536    Size = CGF.Builder.CreateExtractValue(MulRes, 0);
537
538    // If we have a cookie, we need to add that size in, too.
539    if (!CookieSize.isZero()) {
540      SizeWithoutCookie = Size;
541
542      llvm::Value *CookieSizeV
543        = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
544      llvm::Value *UAddF
545        = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1);
546      llvm::Value *AddRes
547        = CGF.Builder.CreateCall2(UAddF, SizeWithoutCookie, CookieSizeV);
548
549      Size = CGF.Builder.CreateExtractValue(AddRes, 0);
550
551      llvm::Value *AddDidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1);
552      DidOverflow = CGF.Builder.CreateAnd(DidOverflow, AddDidOverflow);
553    }
554
555    Size = CGF.Builder.CreateSelect(DidOverflow,
556                                    llvm::ConstantInt::get(SizeTy, -1),
557                                    Size);
558  }
559
560  if (CookieSize.isZero())
561    SizeWithoutCookie = Size;
562  else
563    assert(SizeWithoutCookie && "didn't set SizeWithoutCookie?");
564
565  return Size;
566}
567
568static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E,
569                                    llvm::Value *NewPtr) {
570
571  assert(E->getNumConstructorArgs() == 1 &&
572         "Can only have one argument to initializer of POD type.");
573
574  const Expr *Init = E->getConstructorArg(0);
575  QualType AllocType = E->getAllocatedType();
576
577  unsigned Alignment =
578    CGF.getContext().getTypeAlignInChars(AllocType).getQuantity();
579  if (!CGF.hasAggregateLLVMType(AllocType))
580    CGF.EmitStoreOfScalar(CGF.EmitScalarExpr(Init), NewPtr,
581                          AllocType.isVolatileQualified(), Alignment,
582                          AllocType);
583  else if (AllocType->isAnyComplexType())
584    CGF.EmitComplexExprIntoAddr(Init, NewPtr,
585                                AllocType.isVolatileQualified());
586  else {
587    AggValueSlot Slot
588      = AggValueSlot::forAddr(NewPtr, AllocType.isVolatileQualified(), true);
589    CGF.EmitAggExpr(Init, Slot);
590  }
591}
592
593void
594CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
595                                         llvm::Value *NewPtr,
596                                         llvm::Value *NumElements) {
597  // We have a POD type.
598  if (E->getNumConstructorArgs() == 0)
599    return;
600
601  const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
602
603  // Create a temporary for the loop index and initialize it with 0.
604  llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index");
605  llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
606  Builder.CreateStore(Zero, IndexPtr);
607
608  // Start the loop with a block that tests the condition.
609  llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
610  llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
611
612  EmitBlock(CondBlock);
613
614  llvm::BasicBlock *ForBody = createBasicBlock("for.body");
615
616  // Generate: if (loop-index < number-of-elements fall to the loop body,
617  // otherwise, go to the block after the for-loop.
618  llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
619  llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElements, "isless");
620  // If the condition is true, execute the body.
621  Builder.CreateCondBr(IsLess, ForBody, AfterFor);
622
623  EmitBlock(ForBody);
624
625  llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
626  // Inside the loop body, emit the constructor call on the array element.
627  Counter = Builder.CreateLoad(IndexPtr);
628  llvm::Value *Address = Builder.CreateInBoundsGEP(NewPtr, Counter,
629                                                   "arrayidx");
630  StoreAnyExprIntoOneUnit(*this, E, Address);
631
632  EmitBlock(ContinueBlock);
633
634  // Emit the increment of the loop counter.
635  llvm::Value *NextVal = llvm::ConstantInt::get(SizeTy, 1);
636  Counter = Builder.CreateLoad(IndexPtr);
637  NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
638  Builder.CreateStore(NextVal, IndexPtr);
639
640  // Finally, branch back up to the condition for the next iteration.
641  EmitBranch(CondBlock);
642
643  // Emit the fall-through block.
644  EmitBlock(AfterFor, true);
645}
646
647static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
648                           llvm::Value *NewPtr, llvm::Value *Size) {
649  llvm::LLVMContext &VMContext = CGF.CGM.getLLVMContext();
650  const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
651  if (NewPtr->getType() != BP)
652    NewPtr = CGF.Builder.CreateBitCast(NewPtr, BP, "tmp");
653
654  CharUnits Alignment = CGF.getContext().getTypeAlignInChars(T);
655  CGF.Builder.CreateMemSet(NewPtr, CGF.Builder.getInt8(0), Size,
656                           Alignment.getQuantity(), false);
657}
658
659static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
660                               llvm::Value *NewPtr,
661                               llvm::Value *NumElements,
662                               llvm::Value *AllocSizeWithoutCookie) {
663  if (E->isArray()) {
664    if (CXXConstructorDecl *Ctor = E->getConstructor()) {
665      bool RequiresZeroInitialization = false;
666      if (Ctor->getParent()->hasTrivialConstructor()) {
667        // If new expression did not specify value-initialization, then there
668        // is no initialization.
669        if (!E->hasInitializer() || Ctor->getParent()->isEmpty())
670          return;
671
672        if (CGF.CGM.getTypes().isZeroInitializable(E->getAllocatedType())) {
673          // Optimization: since zero initialization will just set the memory
674          // to all zeroes, generate a single memset to do it in one shot.
675          EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr,
676                         AllocSizeWithoutCookie);
677          return;
678        }
679
680        RequiresZeroInitialization = true;
681      }
682
683      CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
684                                     E->constructor_arg_begin(),
685                                     E->constructor_arg_end(),
686                                     RequiresZeroInitialization);
687      return;
688    } else if (E->getNumConstructorArgs() == 1 &&
689               isa<ImplicitValueInitExpr>(E->getConstructorArg(0))) {
690      // Optimization: since zero initialization will just set the memory
691      // to all zeroes, generate a single memset to do it in one shot.
692      EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr,
693                     AllocSizeWithoutCookie);
694      return;
695    } else {
696      CGF.EmitNewArrayInitializer(E, NewPtr, NumElements);
697      return;
698    }
699  }
700
701  if (CXXConstructorDecl *Ctor = E->getConstructor()) {
702    // Per C++ [expr.new]p15, if we have an initializer, then we're performing
703    // direct initialization. C++ [dcl.init]p5 requires that we
704    // zero-initialize storage if there are no user-declared constructors.
705    if (E->hasInitializer() &&
706        !Ctor->getParent()->hasUserDeclaredConstructor() &&
707        !Ctor->getParent()->isEmpty())
708      CGF.EmitNullInitialization(NewPtr, E->getAllocatedType());
709
710    CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
711                               NewPtr, E->constructor_arg_begin(),
712                               E->constructor_arg_end());
713
714    return;
715  }
716  // We have a POD type.
717  if (E->getNumConstructorArgs() == 0)
718    return;
719
720  StoreAnyExprIntoOneUnit(CGF, E, NewPtr);
721}
722
723namespace {
724/// A utility class for saving an rvalue.
725class SavedRValue {
726public:
727  enum Kind { ScalarLiteral, ScalarAddress,
728              AggregateLiteral, AggregateAddress,
729              Complex };
730
731private:
732  llvm::Value *Value;
733  Kind K;
734
735  SavedRValue(llvm::Value *V, Kind K) : Value(V), K(K) {}
736
737public:
738  SavedRValue() {}
739
740  static SavedRValue forScalarLiteral(llvm::Value *V) {
741    return SavedRValue(V, ScalarLiteral);
742  }
743
744  static SavedRValue forScalarAddress(llvm::Value *Addr) {
745    return SavedRValue(Addr, ScalarAddress);
746  }
747
748  static SavedRValue forAggregateLiteral(llvm::Value *V) {
749    return SavedRValue(V, AggregateLiteral);
750  }
751
752  static SavedRValue forAggregateAddress(llvm::Value *Addr) {
753    return SavedRValue(Addr, AggregateAddress);
754  }
755
756  static SavedRValue forComplexAddress(llvm::Value *Addr) {
757    return SavedRValue(Addr, Complex);
758  }
759
760  Kind getKind() const { return K; }
761  llvm::Value *getValue() const { return Value; }
762};
763} // end anonymous namespace
764
765/// Given an r-value, perform the code necessary to make sure that a
766/// future RestoreRValue will be able to load the value without
767/// domination concerns.
768static SavedRValue SaveRValue(CodeGenFunction &CGF, RValue RV) {
769  if (RV.isScalar()) {
770    llvm::Value *V = RV.getScalarVal();
771
772    // These automatically dominate and don't need to be saved.
773    if (isa<llvm::Constant>(V) || isa<llvm::AllocaInst>(V))
774      return SavedRValue::forScalarLiteral(V);
775
776    // Everything else needs an alloca.
777    llvm::Value *Addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue");
778    CGF.Builder.CreateStore(V, Addr);
779    return SavedRValue::forScalarAddress(Addr);
780  }
781
782  if (RV.isComplex()) {
783    CodeGenFunction::ComplexPairTy V = RV.getComplexVal();
784    const llvm::Type *ComplexTy =
785      llvm::StructType::get(CGF.getLLVMContext(),
786                            V.first->getType(), V.second->getType(),
787                            (void*) 0);
788    llvm::Value *Addr = CGF.CreateTempAlloca(ComplexTy, "saved-complex");
789    CGF.StoreComplexToAddr(V, Addr, /*volatile*/ false);
790    return SavedRValue::forComplexAddress(Addr);
791  }
792
793  assert(RV.isAggregate());
794  llvm::Value *V = RV.getAggregateAddr(); // TODO: volatile?
795  if (isa<llvm::Constant>(V) || isa<llvm::AllocaInst>(V))
796    return SavedRValue::forAggregateLiteral(V);
797
798  llvm::Value *Addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue");
799  CGF.Builder.CreateStore(V, Addr);
800  return SavedRValue::forAggregateAddress(Addr);
801}
802
803/// Given a saved r-value produced by SaveRValue, perform the code
804/// necessary to restore it to usability at the current insertion
805/// point.
806static RValue RestoreRValue(CodeGenFunction &CGF, SavedRValue RV) {
807  switch (RV.getKind()) {
808  case SavedRValue::ScalarLiteral:
809    return RValue::get(RV.getValue());
810  case SavedRValue::ScalarAddress:
811    return RValue::get(CGF.Builder.CreateLoad(RV.getValue()));
812  case SavedRValue::AggregateLiteral:
813    return RValue::getAggregate(RV.getValue());
814  case SavedRValue::AggregateAddress:
815    return RValue::getAggregate(CGF.Builder.CreateLoad(RV.getValue()));
816  case SavedRValue::Complex:
817    return RValue::getComplex(CGF.LoadComplexFromAddr(RV.getValue(), false));
818  }
819
820  llvm_unreachable("bad saved r-value kind");
821  return RValue();
822}
823
824namespace {
825  /// A cleanup to call the given 'operator delete' function upon
826  /// abnormal exit from a new expression.
827  class CallDeleteDuringNew : public EHScopeStack::Cleanup {
828    size_t NumPlacementArgs;
829    const FunctionDecl *OperatorDelete;
830    llvm::Value *Ptr;
831    llvm::Value *AllocSize;
832
833    RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
834
835  public:
836    static size_t getExtraSize(size_t NumPlacementArgs) {
837      return NumPlacementArgs * sizeof(RValue);
838    }
839
840    CallDeleteDuringNew(size_t NumPlacementArgs,
841                        const FunctionDecl *OperatorDelete,
842                        llvm::Value *Ptr,
843                        llvm::Value *AllocSize)
844      : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
845        Ptr(Ptr), AllocSize(AllocSize) {}
846
847    void setPlacementArg(unsigned I, RValue Arg) {
848      assert(I < NumPlacementArgs && "index out of range");
849      getPlacementArgs()[I] = Arg;
850    }
851
852    void Emit(CodeGenFunction &CGF, bool IsForEH) {
853      const FunctionProtoType *FPT
854        = OperatorDelete->getType()->getAs<FunctionProtoType>();
855      assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
856             (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
857
858      CallArgList DeleteArgs;
859
860      // The first argument is always a void*.
861      FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
862      DeleteArgs.push_back(std::make_pair(RValue::get(Ptr), *AI++));
863
864      // A member 'operator delete' can take an extra 'size_t' argument.
865      if (FPT->getNumArgs() == NumPlacementArgs + 2)
866        DeleteArgs.push_back(std::make_pair(RValue::get(AllocSize), *AI++));
867
868      // Pass the rest of the arguments, which must match exactly.
869      for (unsigned I = 0; I != NumPlacementArgs; ++I)
870        DeleteArgs.push_back(std::make_pair(getPlacementArgs()[I], *AI++));
871
872      // Call 'operator delete'.
873      CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT),
874                   CGF.CGM.GetAddrOfFunction(OperatorDelete),
875                   ReturnValueSlot(), DeleteArgs, OperatorDelete);
876    }
877  };
878
879  /// A cleanup to call the given 'operator delete' function upon
880  /// abnormal exit from a new expression when the new expression is
881  /// conditional.
882  class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup {
883    size_t NumPlacementArgs;
884    const FunctionDecl *OperatorDelete;
885    SavedRValue Ptr;
886    SavedRValue AllocSize;
887
888    SavedRValue *getPlacementArgs() {
889      return reinterpret_cast<SavedRValue*>(this+1);
890    }
891
892  public:
893    static size_t getExtraSize(size_t NumPlacementArgs) {
894      return NumPlacementArgs * sizeof(SavedRValue);
895    }
896
897    CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
898                                   const FunctionDecl *OperatorDelete,
899                                   SavedRValue Ptr,
900                                   SavedRValue AllocSize)
901      : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
902        Ptr(Ptr), AllocSize(AllocSize) {}
903
904    void setPlacementArg(unsigned I, SavedRValue Arg) {
905      assert(I < NumPlacementArgs && "index out of range");
906      getPlacementArgs()[I] = Arg;
907    }
908
909    void Emit(CodeGenFunction &CGF, bool IsForEH) {
910      const FunctionProtoType *FPT
911        = OperatorDelete->getType()->getAs<FunctionProtoType>();
912      assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
913             (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
914
915      CallArgList DeleteArgs;
916
917      // The first argument is always a void*.
918      FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
919      DeleteArgs.push_back(std::make_pair(RestoreRValue(CGF, Ptr), *AI++));
920
921      // A member 'operator delete' can take an extra 'size_t' argument.
922      if (FPT->getNumArgs() == NumPlacementArgs + 2) {
923        RValue RV = RestoreRValue(CGF, AllocSize);
924        DeleteArgs.push_back(std::make_pair(RV, *AI++));
925      }
926
927      // Pass the rest of the arguments, which must match exactly.
928      for (unsigned I = 0; I != NumPlacementArgs; ++I) {
929        RValue RV = RestoreRValue(CGF, getPlacementArgs()[I]);
930        DeleteArgs.push_back(std::make_pair(RV, *AI++));
931      }
932
933      // Call 'operator delete'.
934      CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT),
935                   CGF.CGM.GetAddrOfFunction(OperatorDelete),
936                   ReturnValueSlot(), DeleteArgs, OperatorDelete);
937    }
938  };
939}
940
941/// Enter a cleanup to call 'operator delete' if the initializer in a
942/// new-expression throws.
943static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
944                                  const CXXNewExpr *E,
945                                  llvm::Value *NewPtr,
946                                  llvm::Value *AllocSize,
947                                  const CallArgList &NewArgs) {
948  // If we're not inside a conditional branch, then the cleanup will
949  // dominate and we can do the easier (and more efficient) thing.
950  if (!CGF.isInConditionalBranch()) {
951    CallDeleteDuringNew *Cleanup = CGF.EHStack
952      .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
953                                                 E->getNumPlacementArgs(),
954                                                 E->getOperatorDelete(),
955                                                 NewPtr, AllocSize);
956    for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
957      Cleanup->setPlacementArg(I, NewArgs[I+1].first);
958
959    return;
960  }
961
962  // Otherwise, we need to save all this stuff.
963  SavedRValue SavedNewPtr = SaveRValue(CGF, RValue::get(NewPtr));
964  SavedRValue SavedAllocSize = SaveRValue(CGF, RValue::get(AllocSize));
965
966  CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
967    .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(InactiveEHCleanup,
968                                                 E->getNumPlacementArgs(),
969                                                 E->getOperatorDelete(),
970                                                 SavedNewPtr,
971                                                 SavedAllocSize);
972  for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
973    Cleanup->setPlacementArg(I, SaveRValue(CGF, NewArgs[I+1].first));
974
975  CGF.ActivateCleanupBlock(CGF.EHStack.stable_begin());
976}
977
978llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
979  QualType AllocType = E->getAllocatedType();
980  if (AllocType->isArrayType())
981    while (const ArrayType *AType = getContext().getAsArrayType(AllocType))
982      AllocType = AType->getElementType();
983
984  FunctionDecl *NewFD = E->getOperatorNew();
985  const FunctionProtoType *NewFTy = NewFD->getType()->getAs<FunctionProtoType>();
986
987  CallArgList NewArgs;
988
989  // The allocation size is the first argument.
990  QualType SizeTy = getContext().getSizeType();
991
992  llvm::Value *NumElements = 0;
993  llvm::Value *AllocSizeWithoutCookie = 0;
994  llvm::Value *AllocSize = EmitCXXNewAllocSize(getContext(),
995                                               *this, E, NumElements,
996                                               AllocSizeWithoutCookie);
997
998  NewArgs.push_back(std::make_pair(RValue::get(AllocSize), SizeTy));
999
1000  // Emit the rest of the arguments.
1001  // FIXME: Ideally, this should just use EmitCallArgs.
1002  CXXNewExpr::const_arg_iterator NewArg = E->placement_arg_begin();
1003
1004  // First, use the types from the function type.
1005  // We start at 1 here because the first argument (the allocation size)
1006  // has already been emitted.
1007  for (unsigned i = 1, e = NewFTy->getNumArgs(); i != e; ++i, ++NewArg) {
1008    QualType ArgType = NewFTy->getArgType(i);
1009
1010    assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
1011           getTypePtr() ==
1012           getContext().getCanonicalType(NewArg->getType()).getTypePtr() &&
1013           "type mismatch in call argument!");
1014
1015    NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
1016                                     ArgType));
1017
1018  }
1019
1020  // Either we've emitted all the call args, or we have a call to a
1021  // variadic function.
1022  assert((NewArg == E->placement_arg_end() || NewFTy->isVariadic()) &&
1023         "Extra arguments in non-variadic function!");
1024
1025  // If we still have any arguments, emit them using the type of the argument.
1026  for (CXXNewExpr::const_arg_iterator NewArgEnd = E->placement_arg_end();
1027       NewArg != NewArgEnd; ++NewArg) {
1028    QualType ArgType = NewArg->getType();
1029    NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
1030                                     ArgType));
1031  }
1032
1033  // Emit the call to new.
1034  RValue RV =
1035    EmitCall(CGM.getTypes().getFunctionInfo(NewArgs, NewFTy),
1036             CGM.GetAddrOfFunction(NewFD), ReturnValueSlot(), NewArgs, NewFD);
1037
1038  // If an allocation function is declared with an empty exception specification
1039  // it returns null to indicate failure to allocate storage. [expr.new]p13.
1040  // (We don't need to check for null when there's no new initializer and
1041  // we're allocating a POD type).
1042  bool NullCheckResult = NewFTy->hasEmptyExceptionSpec() &&
1043    !(AllocType->isPODType() && !E->hasInitializer());
1044
1045  llvm::BasicBlock *NullCheckSource = 0;
1046  llvm::BasicBlock *NewNotNull = 0;
1047  llvm::BasicBlock *NewEnd = 0;
1048
1049  llvm::Value *NewPtr = RV.getScalarVal();
1050  unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace();
1051
1052  if (NullCheckResult) {
1053    NullCheckSource = Builder.GetInsertBlock();
1054    NewNotNull = createBasicBlock("new.notnull");
1055    NewEnd = createBasicBlock("new.end");
1056
1057    llvm::Value *IsNull = Builder.CreateIsNull(NewPtr, "new.isnull");
1058    Builder.CreateCondBr(IsNull, NewEnd, NewNotNull);
1059    EmitBlock(NewNotNull);
1060  }
1061
1062  assert((AllocSize == AllocSizeWithoutCookie) ==
1063         CalculateCookiePadding(*this, E).isZero());
1064  if (AllocSize != AllocSizeWithoutCookie) {
1065    assert(E->isArray());
1066    NewPtr = CGM.getCXXABI().InitializeArrayCookie(CGF, NewPtr, NumElements,
1067                                                   AllocType);
1068  }
1069
1070  // If there's an operator delete, enter a cleanup to call it if an
1071  // exception is thrown.
1072  EHScopeStack::stable_iterator CallOperatorDelete;
1073  if (E->getOperatorDelete()) {
1074    EnterNewDeleteCleanup(*this, E, NewPtr, AllocSize, NewArgs);
1075    CallOperatorDelete = EHStack.stable_begin();
1076  }
1077
1078  const llvm::Type *ElementPtrTy
1079    = ConvertTypeForMem(AllocType)->getPointerTo(AS);
1080  NewPtr = Builder.CreateBitCast(NewPtr, ElementPtrTy);
1081
1082  if (E->isArray()) {
1083    EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie);
1084
1085    // NewPtr is a pointer to the base element type.  If we're
1086    // allocating an array of arrays, we'll need to cast back to the
1087    // array pointer type.
1088    const llvm::Type *ResultTy = ConvertTypeForMem(E->getType());
1089    if (NewPtr->getType() != ResultTy)
1090      NewPtr = Builder.CreateBitCast(NewPtr, ResultTy);
1091  } else {
1092    EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie);
1093  }
1094
1095  // Deactivate the 'operator delete' cleanup if we finished
1096  // initialization.
1097  if (CallOperatorDelete.isValid())
1098    DeactivateCleanupBlock(CallOperatorDelete);
1099
1100  if (NullCheckResult) {
1101    Builder.CreateBr(NewEnd);
1102    llvm::BasicBlock *NotNullSource = Builder.GetInsertBlock();
1103    EmitBlock(NewEnd);
1104
1105    llvm::PHINode *PHI = Builder.CreatePHI(NewPtr->getType());
1106    PHI->reserveOperandSpace(2);
1107    PHI->addIncoming(NewPtr, NotNullSource);
1108    PHI->addIncoming(llvm::Constant::getNullValue(NewPtr->getType()),
1109                     NullCheckSource);
1110
1111    NewPtr = PHI;
1112  }
1113
1114  return NewPtr;
1115}
1116
1117void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1118                                     llvm::Value *Ptr,
1119                                     QualType DeleteTy) {
1120  assert(DeleteFD->getOverloadedOperator() == OO_Delete);
1121
1122  const FunctionProtoType *DeleteFTy =
1123    DeleteFD->getType()->getAs<FunctionProtoType>();
1124
1125  CallArgList DeleteArgs;
1126
1127  // Check if we need to pass the size to the delete operator.
1128  llvm::Value *Size = 0;
1129  QualType SizeTy;
1130  if (DeleteFTy->getNumArgs() == 2) {
1131    SizeTy = DeleteFTy->getArgType(1);
1132    CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
1133    Size = llvm::ConstantInt::get(ConvertType(SizeTy),
1134                                  DeleteTypeSize.getQuantity());
1135  }
1136
1137  QualType ArgTy = DeleteFTy->getArgType(0);
1138  llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
1139  DeleteArgs.push_back(std::make_pair(RValue::get(DeletePtr), ArgTy));
1140
1141  if (Size)
1142    DeleteArgs.push_back(std::make_pair(RValue::get(Size), SizeTy));
1143
1144  // Emit the call to delete.
1145  EmitCall(CGM.getTypes().getFunctionInfo(DeleteArgs, DeleteFTy),
1146           CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(),
1147           DeleteArgs, DeleteFD);
1148}
1149
1150namespace {
1151  /// Calls the given 'operator delete' on a single object.
1152  struct CallObjectDelete : EHScopeStack::Cleanup {
1153    llvm::Value *Ptr;
1154    const FunctionDecl *OperatorDelete;
1155    QualType ElementType;
1156
1157    CallObjectDelete(llvm::Value *Ptr,
1158                     const FunctionDecl *OperatorDelete,
1159                     QualType ElementType)
1160      : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1161
1162    void Emit(CodeGenFunction &CGF, bool IsForEH) {
1163      CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
1164    }
1165  };
1166}
1167
1168/// Emit the code for deleting a single object.
1169static void EmitObjectDelete(CodeGenFunction &CGF,
1170                             const FunctionDecl *OperatorDelete,
1171                             llvm::Value *Ptr,
1172                             QualType ElementType) {
1173  // Find the destructor for the type, if applicable.  If the
1174  // destructor is virtual, we'll just emit the vcall and return.
1175  const CXXDestructorDecl *Dtor = 0;
1176  if (const RecordType *RT = ElementType->getAs<RecordType>()) {
1177    CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1178    if (!RD->hasTrivialDestructor()) {
1179      Dtor = RD->getDestructor();
1180
1181      if (Dtor->isVirtual()) {
1182        const llvm::Type *Ty =
1183          CGF.getTypes().GetFunctionType(CGF.getTypes().getFunctionInfo(Dtor,
1184                                                               Dtor_Complete),
1185                                         /*isVariadic=*/false);
1186
1187        llvm::Value *Callee
1188          = CGF.BuildVirtualCall(Dtor, Dtor_Deleting, Ptr, Ty);
1189        CGF.EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0,
1190                              0, 0);
1191
1192        // The dtor took care of deleting the object.
1193        return;
1194      }
1195    }
1196  }
1197
1198  // Make sure that we call delete even if the dtor throws.
1199  CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1200                                            Ptr, OperatorDelete, ElementType);
1201
1202  if (Dtor)
1203    CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1204                              /*ForVirtualBase=*/false, Ptr);
1205
1206  CGF.PopCleanupBlock();
1207}
1208
1209namespace {
1210  /// Calls the given 'operator delete' on an array of objects.
1211  struct CallArrayDelete : EHScopeStack::Cleanup {
1212    llvm::Value *Ptr;
1213    const FunctionDecl *OperatorDelete;
1214    llvm::Value *NumElements;
1215    QualType ElementType;
1216    CharUnits CookieSize;
1217
1218    CallArrayDelete(llvm::Value *Ptr,
1219                    const FunctionDecl *OperatorDelete,
1220                    llvm::Value *NumElements,
1221                    QualType ElementType,
1222                    CharUnits CookieSize)
1223      : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
1224        ElementType(ElementType), CookieSize(CookieSize) {}
1225
1226    void Emit(CodeGenFunction &CGF, bool IsForEH) {
1227      const FunctionProtoType *DeleteFTy =
1228        OperatorDelete->getType()->getAs<FunctionProtoType>();
1229      assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2);
1230
1231      CallArgList Args;
1232
1233      // Pass the pointer as the first argument.
1234      QualType VoidPtrTy = DeleteFTy->getArgType(0);
1235      llvm::Value *DeletePtr
1236        = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
1237      Args.push_back(std::make_pair(RValue::get(DeletePtr), VoidPtrTy));
1238
1239      // Pass the original requested size as the second argument.
1240      if (DeleteFTy->getNumArgs() == 2) {
1241        QualType size_t = DeleteFTy->getArgType(1);
1242        const llvm::IntegerType *SizeTy
1243          = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
1244
1245        CharUnits ElementTypeSize =
1246          CGF.CGM.getContext().getTypeSizeInChars(ElementType);
1247
1248        // The size of an element, multiplied by the number of elements.
1249        llvm::Value *Size
1250          = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
1251        Size = CGF.Builder.CreateMul(Size, NumElements);
1252
1253        // Plus the size of the cookie if applicable.
1254        if (!CookieSize.isZero()) {
1255          llvm::Value *CookieSizeV
1256            = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
1257          Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
1258        }
1259
1260        Args.push_back(std::make_pair(RValue::get(Size), size_t));
1261      }
1262
1263      // Emit the call to delete.
1264      CGF.EmitCall(CGF.getTypes().getFunctionInfo(Args, DeleteFTy),
1265                   CGF.CGM.GetAddrOfFunction(OperatorDelete),
1266                   ReturnValueSlot(), Args, OperatorDelete);
1267    }
1268  };
1269}
1270
1271/// Emit the code for deleting an array of objects.
1272static void EmitArrayDelete(CodeGenFunction &CGF,
1273                            const FunctionDecl *OperatorDelete,
1274                            llvm::Value *Ptr,
1275                            QualType ElementType) {
1276  llvm::Value *NumElements = 0;
1277  llvm::Value *AllocatedPtr = 0;
1278  CharUnits CookieSize;
1279  CGF.CGM.getCXXABI().ReadArrayCookie(CGF, Ptr, ElementType,
1280                                      NumElements, AllocatedPtr, CookieSize);
1281
1282  assert(AllocatedPtr && "ReadArrayCookie didn't set AllocatedPtr");
1283
1284  // Make sure that we call delete even if one of the dtors throws.
1285  CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
1286                                           AllocatedPtr, OperatorDelete,
1287                                           NumElements, ElementType,
1288                                           CookieSize);
1289
1290  if (const CXXRecordDecl *RD = ElementType->getAsCXXRecordDecl()) {
1291    if (!RD->hasTrivialDestructor()) {
1292      assert(NumElements && "ReadArrayCookie didn't find element count"
1293                            " for a class with destructor");
1294      CGF.EmitCXXAggrDestructorCall(RD->getDestructor(), NumElements, Ptr);
1295    }
1296  }
1297
1298  CGF.PopCleanupBlock();
1299}
1300
1301void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
1302
1303  // Get at the argument before we performed the implicit conversion
1304  // to void*.
1305  const Expr *Arg = E->getArgument();
1306  while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
1307    if (ICE->getCastKind() != CK_UserDefinedConversion &&
1308        ICE->getType()->isVoidPointerType())
1309      Arg = ICE->getSubExpr();
1310    else
1311      break;
1312  }
1313
1314  llvm::Value *Ptr = EmitScalarExpr(Arg);
1315
1316  // Null check the pointer.
1317  llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
1318  llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
1319
1320  llvm::Value *IsNull =
1321    Builder.CreateICmpEQ(Ptr, llvm::Constant::getNullValue(Ptr->getType()),
1322                         "isnull");
1323
1324  Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
1325  EmitBlock(DeleteNotNull);
1326
1327  // We might be deleting a pointer to array.  If so, GEP down to the
1328  // first non-array element.
1329  // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
1330  QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
1331  if (DeleteTy->isConstantArrayType()) {
1332    llvm::Value *Zero = Builder.getInt32(0);
1333    llvm::SmallVector<llvm::Value*,8> GEP;
1334
1335    GEP.push_back(Zero); // point at the outermost array
1336
1337    // For each layer of array type we're pointing at:
1338    while (const ConstantArrayType *Arr
1339             = getContext().getAsConstantArrayType(DeleteTy)) {
1340      // 1. Unpeel the array type.
1341      DeleteTy = Arr->getElementType();
1342
1343      // 2. GEP to the first element of the array.
1344      GEP.push_back(Zero);
1345    }
1346
1347    Ptr = Builder.CreateInBoundsGEP(Ptr, GEP.begin(), GEP.end(), "del.first");
1348  }
1349
1350  assert(ConvertTypeForMem(DeleteTy) ==
1351         cast<llvm::PointerType>(Ptr->getType())->getElementType());
1352
1353  if (E->isArrayForm()) {
1354    EmitArrayDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy);
1355  } else {
1356    EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy);
1357  }
1358
1359  EmitBlock(DeleteEnd);
1360}
1361
1362llvm::Value * CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
1363  QualType Ty = E->getType();
1364  const llvm::Type *LTy = ConvertType(Ty)->getPointerTo();
1365
1366  if (E->isTypeOperand()) {
1367    llvm::Constant *TypeInfo =
1368      CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand());
1369    return Builder.CreateBitCast(TypeInfo, LTy);
1370  }
1371
1372  Expr *subE = E->getExprOperand();
1373  Ty = subE->getType();
1374  CanQualType CanTy = CGM.getContext().getCanonicalType(Ty);
1375  Ty = CanTy.getUnqualifiedType().getNonReferenceType();
1376  if (const RecordType *RT = Ty->getAs<RecordType>()) {
1377    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1378    if (RD->isPolymorphic()) {
1379      // FIXME: if subE is an lvalue do
1380      LValue Obj = EmitLValue(subE);
1381      llvm::Value *This = Obj.getAddress();
1382      // We need to do a zero check for *p, unless it has NonNullAttr.
1383      // FIXME: PointerType->hasAttr<NonNullAttr>()
1384      bool CanBeZero = false;
1385      if (UnaryOperator *UO = dyn_cast<UnaryOperator>(subE->IgnoreParens()))
1386        if (UO->getOpcode() == UO_Deref)
1387          CanBeZero = true;
1388      if (CanBeZero) {
1389        llvm::BasicBlock *NonZeroBlock = createBasicBlock();
1390        llvm::BasicBlock *ZeroBlock = createBasicBlock();
1391
1392        llvm::Value *Zero = llvm::Constant::getNullValue(This->getType());
1393        Builder.CreateCondBr(Builder.CreateICmpNE(This, Zero),
1394                             NonZeroBlock, ZeroBlock);
1395        EmitBlock(ZeroBlock);
1396        /// Call __cxa_bad_typeid
1397        const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
1398        const llvm::FunctionType *FTy;
1399        FTy = llvm::FunctionType::get(ResultType, false);
1400        llvm::Value *F = CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1401        Builder.CreateCall(F)->setDoesNotReturn();
1402        Builder.CreateUnreachable();
1403        EmitBlock(NonZeroBlock);
1404      }
1405      llvm::Value *V = GetVTablePtr(This, LTy->getPointerTo());
1406      V = Builder.CreateConstInBoundsGEP1_64(V, -1ULL);
1407      V = Builder.CreateLoad(V);
1408      return V;
1409    }
1410  }
1411  return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(Ty), LTy);
1412}
1413
1414llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *V,
1415                                              const CXXDynamicCastExpr *DCE) {
1416  QualType SrcTy = DCE->getSubExpr()->getType();
1417  QualType DestTy = DCE->getTypeAsWritten();
1418  QualType InnerType = DestTy->getPointeeType();
1419
1420  const llvm::Type *LTy = ConvertType(DCE->getType());
1421
1422  bool CanBeZero = false;
1423  bool ToVoid = false;
1424  bool ThrowOnBad = false;
1425  if (DestTy->isPointerType()) {
1426    // FIXME: if PointerType->hasAttr<NonNullAttr>(), we don't set this
1427    CanBeZero = true;
1428    if (InnerType->isVoidType())
1429      ToVoid = true;
1430  } else {
1431    LTy = LTy->getPointerTo();
1432
1433    // FIXME: What if exceptions are disabled?
1434    ThrowOnBad = true;
1435  }
1436
1437  if (SrcTy->isPointerType() || SrcTy->isReferenceType())
1438    SrcTy = SrcTy->getPointeeType();
1439  SrcTy = SrcTy.getUnqualifiedType();
1440
1441  if (DestTy->isPointerType() || DestTy->isReferenceType())
1442    DestTy = DestTy->getPointeeType();
1443  DestTy = DestTy.getUnqualifiedType();
1444
1445  llvm::BasicBlock *ContBlock = createBasicBlock();
1446  llvm::BasicBlock *NullBlock = 0;
1447  llvm::BasicBlock *NonZeroBlock = 0;
1448  if (CanBeZero) {
1449    NonZeroBlock = createBasicBlock();
1450    NullBlock = createBasicBlock();
1451    Builder.CreateCondBr(Builder.CreateIsNotNull(V), NonZeroBlock, NullBlock);
1452    EmitBlock(NonZeroBlock);
1453  }
1454
1455  llvm::BasicBlock *BadCastBlock = 0;
1456
1457  const llvm::Type *PtrDiffTy = ConvertType(getContext().getPointerDiffType());
1458
1459  // See if this is a dynamic_cast(void*)
1460  if (ToVoid) {
1461    llvm::Value *This = V;
1462    V = GetVTablePtr(This, PtrDiffTy->getPointerTo());
1463    V = Builder.CreateConstInBoundsGEP1_64(V, -2ULL);
1464    V = Builder.CreateLoad(V, "offset to top");
1465    This = Builder.CreateBitCast(This, llvm::Type::getInt8PtrTy(VMContext));
1466    V = Builder.CreateInBoundsGEP(This, V);
1467    V = Builder.CreateBitCast(V, LTy);
1468  } else {
1469    /// Call __dynamic_cast
1470    const llvm::Type *ResultType = llvm::Type::getInt8PtrTy(VMContext);
1471    const llvm::FunctionType *FTy;
1472    std::vector<const llvm::Type*> ArgTys;
1473    const llvm::Type *PtrToInt8Ty
1474      = llvm::Type::getInt8Ty(VMContext)->getPointerTo();
1475    ArgTys.push_back(PtrToInt8Ty);
1476    ArgTys.push_back(PtrToInt8Ty);
1477    ArgTys.push_back(PtrToInt8Ty);
1478    ArgTys.push_back(PtrDiffTy);
1479    FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
1480
1481    // FIXME: Calculate better hint.
1482    llvm::Value *hint = llvm::ConstantInt::get(PtrDiffTy, -1ULL);
1483
1484    assert(SrcTy->isRecordType() && "Src type must be record type!");
1485    assert(DestTy->isRecordType() && "Dest type must be record type!");
1486
1487    llvm::Value *SrcArg
1488      = CGM.GetAddrOfRTTIDescriptor(SrcTy.getUnqualifiedType());
1489    llvm::Value *DestArg
1490      = CGM.GetAddrOfRTTIDescriptor(DestTy.getUnqualifiedType());
1491
1492    V = Builder.CreateBitCast(V, PtrToInt8Ty);
1493    V = Builder.CreateCall4(CGM.CreateRuntimeFunction(FTy, "__dynamic_cast"),
1494                            V, SrcArg, DestArg, hint);
1495    V = Builder.CreateBitCast(V, LTy);
1496
1497    if (ThrowOnBad) {
1498      BadCastBlock = createBasicBlock();
1499      Builder.CreateCondBr(Builder.CreateIsNotNull(V), ContBlock, BadCastBlock);
1500      EmitBlock(BadCastBlock);
1501      /// Invoke __cxa_bad_cast
1502      ResultType = llvm::Type::getVoidTy(VMContext);
1503      const llvm::FunctionType *FBadTy;
1504      FBadTy = llvm::FunctionType::get(ResultType, false);
1505      llvm::Value *F = CGM.CreateRuntimeFunction(FBadTy, "__cxa_bad_cast");
1506      if (llvm::BasicBlock *InvokeDest = getInvokeDest()) {
1507        llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
1508        Builder.CreateInvoke(F, Cont, InvokeDest)->setDoesNotReturn();
1509        EmitBlock(Cont);
1510      } else {
1511        // FIXME: Does this ever make sense?
1512        Builder.CreateCall(F)->setDoesNotReturn();
1513      }
1514      Builder.CreateUnreachable();
1515    }
1516  }
1517
1518  if (CanBeZero) {
1519    Builder.CreateBr(ContBlock);
1520    EmitBlock(NullBlock);
1521    Builder.CreateBr(ContBlock);
1522  }
1523  EmitBlock(ContBlock);
1524  if (CanBeZero) {
1525    llvm::PHINode *PHI = Builder.CreatePHI(LTy);
1526    PHI->reserveOperandSpace(2);
1527    PHI->addIncoming(V, NonZeroBlock);
1528    PHI->addIncoming(llvm::Constant::getNullValue(LTy), NullBlock);
1529    V = PHI;
1530  }
1531
1532  return V;
1533}
1534