CGExprCXX.cpp revision 4c40d98ab7acf5f27fa89b17bd8fc0ef7683df37
1//===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This contains code dealing with code generation of C++ expressions
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CGCXXABI.h"
16#include "CGObjCRuntime.h"
17#include "llvm/Intrinsics.h"
18using namespace clang;
19using namespace CodeGen;
20
21RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
22                                          llvm::Value *Callee,
23                                          ReturnValueSlot ReturnValue,
24                                          llvm::Value *This,
25                                          llvm::Value *VTT,
26                                          CallExpr::const_arg_iterator ArgBeg,
27                                          CallExpr::const_arg_iterator ArgEnd) {
28  assert(MD->isInstance() &&
29         "Trying to emit a member call expr on a static method!");
30
31  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
32
33  CallArgList Args;
34
35  // Push the this ptr.
36  Args.push_back(std::make_pair(RValue::get(This),
37                                MD->getThisType(getContext())));
38
39  // If there is a VTT parameter, emit it.
40  if (VTT) {
41    QualType T = getContext().getPointerType(getContext().VoidPtrTy);
42    Args.push_back(std::make_pair(RValue::get(VTT), T));
43  }
44
45  // And the rest of the call args
46  EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
47
48  QualType ResultType = FPT->getResultType();
49  return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args,
50                                                 FPT->getExtInfo()),
51                  Callee, ReturnValue, Args, MD);
52}
53
54/// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
55/// expr can be devirtualized.
56static bool canDevirtualizeMemberFunctionCalls(const Expr *Base) {
57  if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
58    if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
59      // This is a record decl. We know the type and can devirtualize it.
60      return VD->getType()->isRecordType();
61    }
62
63    return false;
64  }
65
66  // We can always devirtualize calls on temporary object expressions.
67  if (isa<CXXConstructExpr>(Base))
68    return true;
69
70  // And calls on bound temporaries.
71  if (isa<CXXBindTemporaryExpr>(Base))
72    return true;
73
74  // Check if this is a call expr that returns a record type.
75  if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
76    return CE->getCallReturnType()->isRecordType();
77
78  // We can't devirtualize the call.
79  return false;
80}
81
82RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
83                                              ReturnValueSlot ReturnValue) {
84  if (isa<BinaryOperator>(CE->getCallee()->IgnoreParens()))
85    return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
86
87  const MemberExpr *ME = cast<MemberExpr>(CE->getCallee()->IgnoreParens());
88  const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
89
90  if (MD->isStatic()) {
91    // The method is static, emit it as we would a regular call.
92    llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
93    return EmitCall(getContext().getPointerType(MD->getType()), Callee,
94                    ReturnValue, CE->arg_begin(), CE->arg_end());
95  }
96
97  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
98
99  const llvm::Type *Ty =
100    CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
101                                   FPT->isVariadic());
102  llvm::Value *This;
103
104  if (ME->isArrow())
105    This = EmitScalarExpr(ME->getBase());
106  else {
107    LValue BaseLV = EmitLValue(ME->getBase());
108    This = BaseLV.getAddress();
109  }
110
111  if (MD->isCopyAssignment() && MD->isTrivial()) {
112    // We don't like to generate the trivial copy assignment operator when
113    // it isn't necessary; just produce the proper effect here.
114    llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
115    EmitAggregateCopy(This, RHS, CE->getType());
116    return RValue::get(This);
117  }
118
119  // C++ [class.virtual]p12:
120  //   Explicit qualification with the scope operator (5.1) suppresses the
121  //   virtual call mechanism.
122  //
123  // We also don't emit a virtual call if the base expression has a record type
124  // because then we know what the type is.
125  llvm::Value *Callee;
126  if (const CXXDestructorDecl *Destructor
127             = dyn_cast<CXXDestructorDecl>(MD)) {
128    if (Destructor->isTrivial())
129      return RValue::get(0);
130    if (MD->isVirtual() && !ME->hasQualifier() &&
131        !canDevirtualizeMemberFunctionCalls(ME->getBase())) {
132      Callee = BuildVirtualCall(Destructor, Dtor_Complete, This, Ty);
133    } else {
134      Callee = CGM.GetAddrOfFunction(GlobalDecl(Destructor, Dtor_Complete), Ty);
135    }
136  } else if (MD->isVirtual() && !ME->hasQualifier() &&
137             !canDevirtualizeMemberFunctionCalls(ME->getBase())) {
138    Callee = BuildVirtualCall(MD, This, Ty);
139  } else {
140    Callee = CGM.GetAddrOfFunction(MD, Ty);
141  }
142
143  return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
144                           CE->arg_begin(), CE->arg_end());
145}
146
147RValue
148CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
149                                              ReturnValueSlot ReturnValue) {
150  const BinaryOperator *BO =
151      cast<BinaryOperator>(E->getCallee()->IgnoreParens());
152  const Expr *BaseExpr = BO->getLHS();
153  const Expr *MemFnExpr = BO->getRHS();
154
155  const MemberPointerType *MPT =
156    MemFnExpr->getType()->getAs<MemberPointerType>();
157
158  const FunctionProtoType *FPT =
159    MPT->getPointeeType()->getAs<FunctionProtoType>();
160  const CXXRecordDecl *RD =
161    cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
162
163  // Get the member function pointer.
164  llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
165
166  // Emit the 'this' pointer.
167  llvm::Value *This;
168
169  if (BO->getOpcode() == BO_PtrMemI)
170    This = EmitScalarExpr(BaseExpr);
171  else
172    This = EmitLValue(BaseExpr).getAddress();
173
174  // Ask the ABI to load the callee.  Note that This is modified.
175  llvm::Value *Callee =
176    CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(CGF, This, MemFnPtr, MPT);
177
178  CallArgList Args;
179
180  QualType ThisType =
181    getContext().getPointerType(getContext().getTagDeclType(RD));
182
183  // Push the this ptr.
184  Args.push_back(std::make_pair(RValue::get(This), ThisType));
185
186  // And the rest of the call args
187  EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
188  const FunctionType *BO_FPT = BO->getType()->getAs<FunctionProtoType>();
189  return EmitCall(CGM.getTypes().getFunctionInfo(Args, BO_FPT), Callee,
190                  ReturnValue, Args);
191}
192
193RValue
194CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
195                                               const CXXMethodDecl *MD,
196                                               ReturnValueSlot ReturnValue) {
197  assert(MD->isInstance() &&
198         "Trying to emit a member call expr on a static method!");
199  if (MD->isCopyAssignment()) {
200    const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(MD->getDeclContext());
201    if (ClassDecl->hasTrivialCopyAssignment()) {
202      assert(!ClassDecl->hasUserDeclaredCopyAssignment() &&
203             "EmitCXXOperatorMemberCallExpr - user declared copy assignment");
204      LValue LV = EmitLValue(E->getArg(0));
205      llvm::Value *This;
206      if (LV.isPropertyRef()) {
207        llvm::Value *AggLoc  = CreateMemTemp(E->getArg(1)->getType());
208        EmitAggExpr(E->getArg(1), AggLoc, false /*VolatileDest*/);
209        EmitObjCPropertySet(LV.getPropertyRefExpr(),
210                            RValue::getAggregate(AggLoc, false /*VolatileDest*/));
211        return RValue::getAggregate(0, false);
212      }
213      else
214        This = LV.getAddress();
215
216      llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
217      QualType Ty = E->getType();
218      EmitAggregateCopy(This, Src, Ty);
219      return RValue::get(This);
220    }
221  }
222
223  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
224  const llvm::Type *Ty =
225    CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
226                                   FPT->isVariadic());
227  LValue LV = EmitLValue(E->getArg(0));
228  llvm::Value *This;
229  if (LV.isPropertyRef()) {
230    RValue RV = EmitLoadOfPropertyRefLValue(LV, E->getArg(0)->getType());
231    assert (!RV.isScalar() && "EmitCXXOperatorMemberCallExpr");
232    This = RV.getAggregateAddr();
233  }
234  else
235    This = LV.getAddress();
236
237  llvm::Value *Callee;
238  if (MD->isVirtual() && !canDevirtualizeMemberFunctionCalls(E->getArg(0)))
239    Callee = BuildVirtualCall(MD, This, Ty);
240  else
241    Callee = CGM.GetAddrOfFunction(MD, Ty);
242
243  return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
244                           E->arg_begin() + 1, E->arg_end());
245}
246
247void
248CodeGenFunction::EmitCXXConstructExpr(llvm::Value *Dest,
249                                      const CXXConstructExpr *E) {
250  assert(Dest && "Must have a destination!");
251  const CXXConstructorDecl *CD = E->getConstructor();
252
253  // If we require zero initialization before (or instead of) calling the
254  // constructor, as can be the case with a non-user-provided default
255  // constructor, emit the zero initialization now.
256  if (E->requiresZeroInitialization())
257    EmitNullInitialization(Dest, E->getType());
258
259
260  // If this is a call to a trivial default constructor, do nothing.
261  if (CD->isTrivial() && CD->isDefaultConstructor())
262    return;
263
264  // Code gen optimization to eliminate copy constructor and return
265  // its first argument instead, if in fact that argument is a temporary
266  // object.
267  if (getContext().getLangOptions().ElideConstructors && E->isElidable()) {
268    if (const Expr *Arg = E->getArg(0)->getTemporaryObject()) {
269      EmitAggExpr(Arg, Dest, false);
270      return;
271    }
272  }
273
274  const ConstantArrayType *Array
275    = getContext().getAsConstantArrayType(E->getType());
276  if (Array) {
277    QualType BaseElementTy = getContext().getBaseElementType(Array);
278    const llvm::Type *BasePtr = ConvertType(BaseElementTy);
279    BasePtr = llvm::PointerType::getUnqual(BasePtr);
280    llvm::Value *BaseAddrPtr =
281      Builder.CreateBitCast(Dest, BasePtr);
282
283    EmitCXXAggrConstructorCall(CD, Array, BaseAddrPtr,
284                               E->arg_begin(), E->arg_end());
285  }
286  else {
287    CXXCtorType Type =
288      (E->getConstructionKind() == CXXConstructExpr::CK_Complete)
289      ? Ctor_Complete : Ctor_Base;
290    bool ForVirtualBase =
291      E->getConstructionKind() == CXXConstructExpr::CK_VirtualBase;
292
293    // Call the constructor.
294    EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest,
295                           E->arg_begin(), E->arg_end());
296  }
297}
298
299static CharUnits CalculateCookiePadding(ASTContext &Ctx, QualType ElementType) {
300  ElementType = Ctx.getBaseElementType(ElementType);
301  const RecordType *RT = ElementType->getAs<RecordType>();
302  if (!RT)
303    return CharUnits::Zero();
304
305  const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
306  if (!RD)
307    return CharUnits::Zero();
308
309  // Check if the class has a trivial destructor.
310  if (RD->hasTrivialDestructor()) {
311    // Check if the usual deallocation function takes two arguments.
312    const CXXMethodDecl *UsualDeallocationFunction = 0;
313
314    DeclarationName OpName =
315      Ctx.DeclarationNames.getCXXOperatorName(OO_Array_Delete);
316    DeclContext::lookup_const_iterator Op, OpEnd;
317    for (llvm::tie(Op, OpEnd) = RD->lookup(OpName);
318         Op != OpEnd; ++Op) {
319      const CXXMethodDecl *Delete = cast<CXXMethodDecl>(*Op);
320
321      if (Delete->isUsualDeallocationFunction()) {
322        UsualDeallocationFunction = Delete;
323        break;
324      }
325    }
326
327    // No usual deallocation function, we don't need a cookie.
328    if (!UsualDeallocationFunction)
329      return CharUnits::Zero();
330
331    // The usual deallocation function doesn't take a size_t argument, so we
332    // don't need a cookie.
333    if (UsualDeallocationFunction->getNumParams() == 1)
334      return CharUnits::Zero();
335
336    assert(UsualDeallocationFunction->getNumParams() == 2 &&
337           "Unexpected deallocation function type!");
338  }
339
340  // Padding is the maximum of sizeof(size_t) and alignof(ElementType)
341  return std::max(Ctx.getTypeSizeInChars(Ctx.getSizeType()),
342                  Ctx.getTypeAlignInChars(ElementType));
343}
344
345/// Check whether the given operator new[] is the global placement
346/// operator new[].
347static bool IsPlacementOperatorNewArray(ASTContext &Ctx,
348                                        const FunctionDecl *Fn) {
349  // Must be in global scope.  Note that allocation functions can't be
350  // declared in namespaces.
351  if (!Fn->getDeclContext()->getRedeclContext()->isFileContext())
352    return false;
353
354  // Signature must be void *operator new[](size_t, void*).
355  // The size_t is common to all operator new[]s.
356  if (Fn->getNumParams() != 2)
357    return false;
358
359  CanQualType ParamType = Ctx.getCanonicalType(Fn->getParamDecl(1)->getType());
360  return (ParamType == Ctx.VoidPtrTy);
361}
362
363static CharUnits CalculateCookiePadding(ASTContext &Ctx, const CXXNewExpr *E) {
364  if (!E->isArray())
365    return CharUnits::Zero();
366
367  // No cookie is required if the new operator being used is
368  // ::operator new[](size_t, void*).
369  const FunctionDecl *OperatorNew = E->getOperatorNew();
370  if (IsPlacementOperatorNewArray(Ctx, OperatorNew))
371    return CharUnits::Zero();
372
373  return CalculateCookiePadding(Ctx, E->getAllocatedType());
374}
375
376static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context,
377                                        CodeGenFunction &CGF,
378                                        const CXXNewExpr *E,
379                                        llvm::Value *&NumElements,
380                                        llvm::Value *&SizeWithoutCookie) {
381  QualType ElemType = E->getAllocatedType();
382
383  if (!E->isArray()) {
384    CharUnits TypeSize = CGF.getContext().getTypeSizeInChars(ElemType);
385    const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
386    SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
387    return SizeWithoutCookie;
388  }
389
390  // Emit the array size expression.
391  // We multiply the size of all dimensions for NumElements.
392  // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
393  NumElements = CGF.EmitScalarExpr(E->getArraySize());
394  while (const ConstantArrayType *CAT
395             = CGF.getContext().getAsConstantArrayType(ElemType)) {
396    ElemType = CAT->getElementType();
397    llvm::Value *ArraySize
398        = llvm::ConstantInt::get(CGF.CGM.getLLVMContext(), CAT->getSize());
399    NumElements = CGF.Builder.CreateMul(NumElements, ArraySize);
400  }
401
402  CharUnits TypeSize = CGF.getContext().getTypeSizeInChars(ElemType);
403  const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
404  llvm::Value *Size = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
405
406  // If someone is doing 'new int[42]' there is no need to do a dynamic check.
407  // Don't bloat the -O0 code.
408  if (llvm::ConstantInt *NumElementsC =
409        dyn_cast<llvm::ConstantInt>(NumElements)) {
410    // Determine if there is an overflow here by doing an extended multiply.
411    llvm::APInt NEC = NumElementsC->getValue();
412    NEC.zext(NEC.getBitWidth()*2);
413
414    llvm::APInt SC = cast<llvm::ConstantInt>(Size)->getValue();
415    SC.zext(SC.getBitWidth()*2);
416    SC *= NEC;
417
418    if (SC.countLeadingZeros() >= NumElementsC->getValue().getBitWidth()) {
419      SC.trunc(NumElementsC->getValue().getBitWidth());
420      Size = llvm::ConstantInt::get(Size->getContext(), SC);
421    } else {
422      // On overflow, produce a -1 so operator new throws.
423      Size = llvm::Constant::getAllOnesValue(Size->getType());
424    }
425
426  } else {
427    // Multiply with the type size.  This multiply can overflow, e.g. in:
428    //   new double[n]
429    // where n is 2^30 on a 32-bit machine or 2^62 on a 64-bit machine.  Because
430    // of this, we need to detect the overflow and ensure that an exception is
431    // called by forcing the size to -1 on overflow.
432    llvm::Value *UMulF =
433      CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, &SizeTy, 1);
434    llvm::Value *MulRes = CGF.Builder.CreateCall2(UMulF, NumElements, Size);
435    // Branch on the overflow bit to the overflow block, which is lazily
436    // created.
437    llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(MulRes, 1);
438    // Get the normal result of the multiplication.
439    llvm::Value *V = CGF.Builder.CreateExtractValue(MulRes, 0);
440
441    llvm::BasicBlock *NormalBB = CGF.createBasicBlock("no_overflow");
442    llvm::BasicBlock *OverflowBB = CGF.createBasicBlock("overflow");
443
444    CGF.Builder.CreateCondBr(DidOverflow, OverflowBB, NormalBB);
445
446    llvm::BasicBlock *PrevBB = CGF.Builder.GetInsertBlock();
447
448    // We just need the overflow block to build a PHI node.
449    CGF.EmitBlock(OverflowBB);
450    CGF.EmitBlock(NormalBB);
451
452    llvm::PHINode *PN = CGF.Builder.CreatePHI(V->getType());
453
454    PN->addIncoming(V, PrevBB);
455    PN->addIncoming(llvm::Constant::getAllOnesValue(V->getType()), OverflowBB);
456    Size = PN;
457  }
458  SizeWithoutCookie = Size;
459
460  // Add the cookie padding if necessary.
461  CharUnits CookiePadding = CalculateCookiePadding(CGF.getContext(), E);
462  if (!CookiePadding.isZero())
463    Size = CGF.Builder.CreateAdd(Size,
464        llvm::ConstantInt::get(SizeTy, CookiePadding.getQuantity()));
465
466  return Size;
467}
468
469static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E,
470                                    llvm::Value *NewPtr) {
471
472  assert(E->getNumConstructorArgs() == 1 &&
473         "Can only have one argument to initializer of POD type.");
474
475  const Expr *Init = E->getConstructorArg(0);
476  QualType AllocType = E->getAllocatedType();
477
478  unsigned Alignment =
479    CGF.getContext().getTypeAlignInChars(AllocType).getQuantity();
480  if (!CGF.hasAggregateLLVMType(AllocType))
481    CGF.EmitStoreOfScalar(CGF.EmitScalarExpr(Init), NewPtr,
482                          AllocType.isVolatileQualified(), Alignment,
483                          AllocType);
484  else if (AllocType->isAnyComplexType())
485    CGF.EmitComplexExprIntoAddr(Init, NewPtr,
486                                AllocType.isVolatileQualified());
487  else
488    CGF.EmitAggExpr(Init, NewPtr, AllocType.isVolatileQualified());
489}
490
491void
492CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
493                                         llvm::Value *NewPtr,
494                                         llvm::Value *NumElements) {
495  // We have a POD type.
496  if (E->getNumConstructorArgs() == 0)
497    return;
498
499  const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
500
501  // Create a temporary for the loop index and initialize it with 0.
502  llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index");
503  llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
504  Builder.CreateStore(Zero, IndexPtr);
505
506  // Start the loop with a block that tests the condition.
507  llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
508  llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
509
510  EmitBlock(CondBlock);
511
512  llvm::BasicBlock *ForBody = createBasicBlock("for.body");
513
514  // Generate: if (loop-index < number-of-elements fall to the loop body,
515  // otherwise, go to the block after the for-loop.
516  llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
517  llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElements, "isless");
518  // If the condition is true, execute the body.
519  Builder.CreateCondBr(IsLess, ForBody, AfterFor);
520
521  EmitBlock(ForBody);
522
523  llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
524  // Inside the loop body, emit the constructor call on the array element.
525  Counter = Builder.CreateLoad(IndexPtr);
526  llvm::Value *Address = Builder.CreateInBoundsGEP(NewPtr, Counter,
527                                                   "arrayidx");
528  StoreAnyExprIntoOneUnit(*this, E, Address);
529
530  EmitBlock(ContinueBlock);
531
532  // Emit the increment of the loop counter.
533  llvm::Value *NextVal = llvm::ConstantInt::get(SizeTy, 1);
534  Counter = Builder.CreateLoad(IndexPtr);
535  NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
536  Builder.CreateStore(NextVal, IndexPtr);
537
538  // Finally, branch back up to the condition for the next iteration.
539  EmitBranch(CondBlock);
540
541  // Emit the fall-through block.
542  EmitBlock(AfterFor, true);
543}
544
545static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
546                           llvm::Value *NewPtr, llvm::Value *Size) {
547  llvm::LLVMContext &VMContext = CGF.CGM.getLLVMContext();
548  const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
549  if (NewPtr->getType() != BP)
550    NewPtr = CGF.Builder.CreateBitCast(NewPtr, BP, "tmp");
551
552  CGF.Builder.CreateCall5(CGF.CGM.getMemSetFn(BP, CGF.IntPtrTy), NewPtr,
553                llvm::Constant::getNullValue(llvm::Type::getInt8Ty(VMContext)),
554                          Size,
555                    llvm::ConstantInt::get(CGF.Int32Ty,
556                                           CGF.getContext().getTypeAlign(T)/8),
557                          llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext),
558                                                 0));
559}
560
561static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
562                               llvm::Value *NewPtr,
563                               llvm::Value *NumElements,
564                               llvm::Value *AllocSizeWithoutCookie) {
565  if (E->isArray()) {
566    if (CXXConstructorDecl *Ctor = E->getConstructor()) {
567      bool RequiresZeroInitialization = false;
568      if (Ctor->getParent()->hasTrivialConstructor()) {
569        // If new expression did not specify value-initialization, then there
570        // is no initialization.
571        if (!E->hasInitializer() || Ctor->getParent()->isEmpty())
572          return;
573
574        if (CGF.CGM.getTypes().isZeroInitializable(E->getAllocatedType())) {
575          // Optimization: since zero initialization will just set the memory
576          // to all zeroes, generate a single memset to do it in one shot.
577          EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr,
578                         AllocSizeWithoutCookie);
579          return;
580        }
581
582        RequiresZeroInitialization = true;
583      }
584
585      CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
586                                     E->constructor_arg_begin(),
587                                     E->constructor_arg_end(),
588                                     RequiresZeroInitialization);
589      return;
590    } else if (E->getNumConstructorArgs() == 1 &&
591               isa<ImplicitValueInitExpr>(E->getConstructorArg(0))) {
592      // Optimization: since zero initialization will just set the memory
593      // to all zeroes, generate a single memset to do it in one shot.
594      EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr,
595                     AllocSizeWithoutCookie);
596      return;
597    } else {
598      CGF.EmitNewArrayInitializer(E, NewPtr, NumElements);
599      return;
600    }
601  }
602
603  if (CXXConstructorDecl *Ctor = E->getConstructor()) {
604    // Per C++ [expr.new]p15, if we have an initializer, then we're performing
605    // direct initialization. C++ [dcl.init]p5 requires that we
606    // zero-initialize storage if there are no user-declared constructors.
607    if (E->hasInitializer() &&
608        !Ctor->getParent()->hasUserDeclaredConstructor() &&
609        !Ctor->getParent()->isEmpty())
610      CGF.EmitNullInitialization(NewPtr, E->getAllocatedType());
611
612    CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
613                               NewPtr, E->constructor_arg_begin(),
614                               E->constructor_arg_end());
615
616    return;
617  }
618  // We have a POD type.
619  if (E->getNumConstructorArgs() == 0)
620    return;
621
622  StoreAnyExprIntoOneUnit(CGF, E, NewPtr);
623}
624
625llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
626  QualType AllocType = E->getAllocatedType();
627  FunctionDecl *NewFD = E->getOperatorNew();
628  const FunctionProtoType *NewFTy = NewFD->getType()->getAs<FunctionProtoType>();
629
630  CallArgList NewArgs;
631
632  // The allocation size is the first argument.
633  QualType SizeTy = getContext().getSizeType();
634
635  llvm::Value *NumElements = 0;
636  llvm::Value *AllocSizeWithoutCookie = 0;
637  llvm::Value *AllocSize = EmitCXXNewAllocSize(getContext(),
638                                               *this, E, NumElements,
639                                               AllocSizeWithoutCookie);
640
641  NewArgs.push_back(std::make_pair(RValue::get(AllocSize), SizeTy));
642
643  // Emit the rest of the arguments.
644  // FIXME: Ideally, this should just use EmitCallArgs.
645  CXXNewExpr::const_arg_iterator NewArg = E->placement_arg_begin();
646
647  // First, use the types from the function type.
648  // We start at 1 here because the first argument (the allocation size)
649  // has already been emitted.
650  for (unsigned i = 1, e = NewFTy->getNumArgs(); i != e; ++i, ++NewArg) {
651    QualType ArgType = NewFTy->getArgType(i);
652
653    assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
654           getTypePtr() ==
655           getContext().getCanonicalType(NewArg->getType()).getTypePtr() &&
656           "type mismatch in call argument!");
657
658    NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
659                                     ArgType));
660
661  }
662
663  // Either we've emitted all the call args, or we have a call to a
664  // variadic function.
665  assert((NewArg == E->placement_arg_end() || NewFTy->isVariadic()) &&
666         "Extra arguments in non-variadic function!");
667
668  // If we still have any arguments, emit them using the type of the argument.
669  for (CXXNewExpr::const_arg_iterator NewArgEnd = E->placement_arg_end();
670       NewArg != NewArgEnd; ++NewArg) {
671    QualType ArgType = NewArg->getType();
672    NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
673                                     ArgType));
674  }
675
676  // Emit the call to new.
677  RValue RV =
678    EmitCall(CGM.getTypes().getFunctionInfo(NewArgs, NewFTy),
679             CGM.GetAddrOfFunction(NewFD), ReturnValueSlot(), NewArgs, NewFD);
680
681  // If an allocation function is declared with an empty exception specification
682  // it returns null to indicate failure to allocate storage. [expr.new]p13.
683  // (We don't need to check for null when there's no new initializer and
684  // we're allocating a POD type).
685  bool NullCheckResult = NewFTy->hasEmptyExceptionSpec() &&
686    !(AllocType->isPODType() && !E->hasInitializer());
687
688  llvm::BasicBlock *NewNull = 0;
689  llvm::BasicBlock *NewNotNull = 0;
690  llvm::BasicBlock *NewEnd = 0;
691
692  llvm::Value *NewPtr = RV.getScalarVal();
693
694  if (NullCheckResult) {
695    NewNull = createBasicBlock("new.null");
696    NewNotNull = createBasicBlock("new.notnull");
697    NewEnd = createBasicBlock("new.end");
698
699    llvm::Value *IsNull =
700      Builder.CreateICmpEQ(NewPtr,
701                           llvm::Constant::getNullValue(NewPtr->getType()),
702                           "isnull");
703
704    Builder.CreateCondBr(IsNull, NewNull, NewNotNull);
705    EmitBlock(NewNotNull);
706  }
707
708  CharUnits CookiePadding = CalculateCookiePadding(getContext(), E);
709  if (!CookiePadding.isZero()) {
710    CharUnits CookieOffset =
711      CookiePadding - getContext().getTypeSizeInChars(SizeTy);
712
713    llvm::Value *NumElementsPtr =
714      Builder.CreateConstInBoundsGEP1_64(NewPtr, CookieOffset.getQuantity());
715
716    NumElementsPtr = Builder.CreateBitCast(NumElementsPtr,
717                                           ConvertType(SizeTy)->getPointerTo());
718    Builder.CreateStore(NumElements, NumElementsPtr);
719
720    // Now add the padding to the new ptr.
721    NewPtr = Builder.CreateConstInBoundsGEP1_64(NewPtr,
722                                                CookiePadding.getQuantity());
723  }
724
725  if (AllocType->isArrayType()) {
726    while (const ArrayType *AType = getContext().getAsArrayType(AllocType))
727      AllocType = AType->getElementType();
728    NewPtr =
729      Builder.CreateBitCast(NewPtr,
730                          ConvertType(getContext().getPointerType(AllocType)));
731    EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie);
732    NewPtr = Builder.CreateBitCast(NewPtr, ConvertType(E->getType()));
733  }
734  else {
735    NewPtr = Builder.CreateBitCast(NewPtr, ConvertType(E->getType()));
736    EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie);
737  }
738
739  if (NullCheckResult) {
740    Builder.CreateBr(NewEnd);
741    NewNotNull = Builder.GetInsertBlock();
742    EmitBlock(NewNull);
743    Builder.CreateBr(NewEnd);
744    EmitBlock(NewEnd);
745
746    llvm::PHINode *PHI = Builder.CreatePHI(NewPtr->getType());
747    PHI->reserveOperandSpace(2);
748    PHI->addIncoming(NewPtr, NewNotNull);
749    PHI->addIncoming(llvm::Constant::getNullValue(NewPtr->getType()), NewNull);
750
751    NewPtr = PHI;
752  }
753
754  return NewPtr;
755}
756
757static std::pair<llvm::Value *, llvm::Value *>
758GetAllocatedObjectPtrAndNumElements(CodeGenFunction &CGF,
759                                    llvm::Value *Ptr, QualType DeleteTy) {
760  QualType SizeTy = CGF.getContext().getSizeType();
761  const llvm::Type *SizeLTy = CGF.ConvertType(SizeTy);
762
763  CharUnits DeleteTypeAlign = CGF.getContext().getTypeAlignInChars(DeleteTy);
764  CharUnits CookiePadding =
765    std::max(CGF.getContext().getTypeSizeInChars(SizeTy),
766             DeleteTypeAlign);
767  assert(!CookiePadding.isZero() && "CookiePadding should not be 0.");
768
769  const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
770  CharUnits CookieOffset =
771    CookiePadding - CGF.getContext().getTypeSizeInChars(SizeTy);
772
773  llvm::Value *AllocatedObjectPtr = CGF.Builder.CreateBitCast(Ptr, Int8PtrTy);
774  AllocatedObjectPtr =
775    CGF.Builder.CreateConstInBoundsGEP1_64(AllocatedObjectPtr,
776                                           -CookiePadding.getQuantity());
777
778  llvm::Value *NumElementsPtr =
779    CGF.Builder.CreateConstInBoundsGEP1_64(AllocatedObjectPtr,
780                                           CookieOffset.getQuantity());
781  NumElementsPtr =
782    CGF.Builder.CreateBitCast(NumElementsPtr, SizeLTy->getPointerTo());
783
784  llvm::Value *NumElements = CGF.Builder.CreateLoad(NumElementsPtr);
785  NumElements =
786    CGF.Builder.CreateIntCast(NumElements, SizeLTy, /*isSigned=*/false);
787
788  return std::make_pair(AllocatedObjectPtr, NumElements);
789}
790
791void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
792                                     llvm::Value *Ptr,
793                                     QualType DeleteTy) {
794  const FunctionProtoType *DeleteFTy =
795    DeleteFD->getType()->getAs<FunctionProtoType>();
796
797  CallArgList DeleteArgs;
798
799  // Check if we need to pass the size to the delete operator.
800  llvm::Value *Size = 0;
801  QualType SizeTy;
802  if (DeleteFTy->getNumArgs() == 2) {
803    SizeTy = DeleteFTy->getArgType(1);
804    CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
805    Size = llvm::ConstantInt::get(ConvertType(SizeTy),
806                                  DeleteTypeSize.getQuantity());
807  }
808
809  if (DeleteFD->getOverloadedOperator() == OO_Array_Delete &&
810      !CalculateCookiePadding(getContext(), DeleteTy).isZero()) {
811    // We need to get the number of elements in the array from the cookie.
812    llvm::Value *AllocatedObjectPtr;
813    llvm::Value *NumElements;
814    llvm::tie(AllocatedObjectPtr, NumElements) =
815      GetAllocatedObjectPtrAndNumElements(*this, Ptr, DeleteTy);
816
817    // Multiply the size with the number of elements.
818    if (Size)
819      Size = Builder.CreateMul(NumElements, Size);
820
821    Ptr = AllocatedObjectPtr;
822  }
823
824  QualType ArgTy = DeleteFTy->getArgType(0);
825  llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
826  DeleteArgs.push_back(std::make_pair(RValue::get(DeletePtr), ArgTy));
827
828  if (Size)
829    DeleteArgs.push_back(std::make_pair(RValue::get(Size), SizeTy));
830
831  // Emit the call to delete.
832  EmitCall(CGM.getTypes().getFunctionInfo(DeleteArgs, DeleteFTy),
833           CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(),
834           DeleteArgs, DeleteFD);
835}
836
837void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
838
839  // Get at the argument before we performed the implicit conversion
840  // to void*.
841  const Expr *Arg = E->getArgument();
842  while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
843    if (ICE->getCastKind() != CK_UserDefinedConversion &&
844        ICE->getType()->isVoidPointerType())
845      Arg = ICE->getSubExpr();
846    else
847      break;
848  }
849
850  QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
851
852  llvm::Value *Ptr = EmitScalarExpr(Arg);
853
854  // Null check the pointer.
855  llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
856  llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
857
858  llvm::Value *IsNull =
859    Builder.CreateICmpEQ(Ptr, llvm::Constant::getNullValue(Ptr->getType()),
860                         "isnull");
861
862  Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
863  EmitBlock(DeleteNotNull);
864
865  bool ShouldCallDelete = true;
866
867  // Call the destructor if necessary.
868  if (const RecordType *RT = DeleteTy->getAs<RecordType>()) {
869    if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
870      if (!RD->hasTrivialDestructor()) {
871        const CXXDestructorDecl *Dtor = RD->getDestructor();
872        if (E->isArrayForm()) {
873          llvm::Value *AllocatedObjectPtr;
874          llvm::Value *NumElements;
875          llvm::tie(AllocatedObjectPtr, NumElements) =
876            GetAllocatedObjectPtrAndNumElements(*this, Ptr, DeleteTy);
877
878          EmitCXXAggrDestructorCall(Dtor, NumElements, Ptr);
879        } else if (Dtor->isVirtual()) {
880          const llvm::Type *Ty =
881            CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(Dtor),
882                                           /*isVariadic=*/false);
883
884          llvm::Value *Callee = BuildVirtualCall(Dtor, Dtor_Deleting, Ptr, Ty);
885          EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0,
886                            0, 0);
887
888          // The dtor took care of deleting the object.
889          ShouldCallDelete = false;
890        } else
891          EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false,
892                                Ptr);
893      }
894    }
895  }
896
897  if (ShouldCallDelete)
898    EmitDeleteCall(E->getOperatorDelete(), Ptr, DeleteTy);
899
900  EmitBlock(DeleteEnd);
901}
902
903llvm::Value * CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
904  QualType Ty = E->getType();
905  const llvm::Type *LTy = ConvertType(Ty)->getPointerTo();
906
907  if (E->isTypeOperand()) {
908    llvm::Constant *TypeInfo =
909      CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand());
910    return Builder.CreateBitCast(TypeInfo, LTy);
911  }
912
913  Expr *subE = E->getExprOperand();
914  Ty = subE->getType();
915  CanQualType CanTy = CGM.getContext().getCanonicalType(Ty);
916  Ty = CanTy.getUnqualifiedType().getNonReferenceType();
917  if (const RecordType *RT = Ty->getAs<RecordType>()) {
918    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
919    if (RD->isPolymorphic()) {
920      // FIXME: if subE is an lvalue do
921      LValue Obj = EmitLValue(subE);
922      llvm::Value *This = Obj.getAddress();
923      LTy = LTy->getPointerTo()->getPointerTo();
924      llvm::Value *V = Builder.CreateBitCast(This, LTy);
925      // We need to do a zero check for *p, unless it has NonNullAttr.
926      // FIXME: PointerType->hasAttr<NonNullAttr>()
927      bool CanBeZero = false;
928      if (UnaryOperator *UO = dyn_cast<UnaryOperator>(subE->IgnoreParens()))
929        if (UO->getOpcode() == UO_Deref)
930          CanBeZero = true;
931      if (CanBeZero) {
932        llvm::BasicBlock *NonZeroBlock = createBasicBlock();
933        llvm::BasicBlock *ZeroBlock = createBasicBlock();
934
935        llvm::Value *Zero = llvm::Constant::getNullValue(LTy);
936        Builder.CreateCondBr(Builder.CreateICmpNE(V, Zero),
937                             NonZeroBlock, ZeroBlock);
938        EmitBlock(ZeroBlock);
939        /// Call __cxa_bad_typeid
940        const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
941        const llvm::FunctionType *FTy;
942        FTy = llvm::FunctionType::get(ResultType, false);
943        llvm::Value *F = CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
944        Builder.CreateCall(F)->setDoesNotReturn();
945        Builder.CreateUnreachable();
946        EmitBlock(NonZeroBlock);
947      }
948      V = Builder.CreateLoad(V, "vtable");
949      V = Builder.CreateConstInBoundsGEP1_64(V, -1ULL);
950      V = Builder.CreateLoad(V);
951      return V;
952    }
953  }
954  return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(Ty), LTy);
955}
956
957llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *V,
958                                              const CXXDynamicCastExpr *DCE) {
959  QualType SrcTy = DCE->getSubExpr()->getType();
960  QualType DestTy = DCE->getTypeAsWritten();
961  QualType InnerType = DestTy->getPointeeType();
962
963  const llvm::Type *LTy = ConvertType(DCE->getType());
964
965  bool CanBeZero = false;
966  bool ToVoid = false;
967  bool ThrowOnBad = false;
968  if (DestTy->isPointerType()) {
969    // FIXME: if PointerType->hasAttr<NonNullAttr>(), we don't set this
970    CanBeZero = true;
971    if (InnerType->isVoidType())
972      ToVoid = true;
973  } else {
974    LTy = LTy->getPointerTo();
975
976    // FIXME: What if exceptions are disabled?
977    ThrowOnBad = true;
978  }
979
980  if (SrcTy->isPointerType() || SrcTy->isReferenceType())
981    SrcTy = SrcTy->getPointeeType();
982  SrcTy = SrcTy.getUnqualifiedType();
983
984  if (DestTy->isPointerType() || DestTy->isReferenceType())
985    DestTy = DestTy->getPointeeType();
986  DestTy = DestTy.getUnqualifiedType();
987
988  llvm::BasicBlock *ContBlock = createBasicBlock();
989  llvm::BasicBlock *NullBlock = 0;
990  llvm::BasicBlock *NonZeroBlock = 0;
991  if (CanBeZero) {
992    NonZeroBlock = createBasicBlock();
993    NullBlock = createBasicBlock();
994    Builder.CreateCondBr(Builder.CreateIsNotNull(V), NonZeroBlock, NullBlock);
995    EmitBlock(NonZeroBlock);
996  }
997
998  llvm::BasicBlock *BadCastBlock = 0;
999
1000  const llvm::Type *PtrDiffTy = ConvertType(getContext().getPointerDiffType());
1001
1002  // See if this is a dynamic_cast(void*)
1003  if (ToVoid) {
1004    llvm::Value *This = V;
1005    V = Builder.CreateBitCast(This, PtrDiffTy->getPointerTo()->getPointerTo());
1006    V = Builder.CreateLoad(V, "vtable");
1007    V = Builder.CreateConstInBoundsGEP1_64(V, -2ULL);
1008    V = Builder.CreateLoad(V, "offset to top");
1009    This = Builder.CreateBitCast(This, llvm::Type::getInt8PtrTy(VMContext));
1010    V = Builder.CreateInBoundsGEP(This, V);
1011    V = Builder.CreateBitCast(V, LTy);
1012  } else {
1013    /// Call __dynamic_cast
1014    const llvm::Type *ResultType = llvm::Type::getInt8PtrTy(VMContext);
1015    const llvm::FunctionType *FTy;
1016    std::vector<const llvm::Type*> ArgTys;
1017    const llvm::Type *PtrToInt8Ty
1018      = llvm::Type::getInt8Ty(VMContext)->getPointerTo();
1019    ArgTys.push_back(PtrToInt8Ty);
1020    ArgTys.push_back(PtrToInt8Ty);
1021    ArgTys.push_back(PtrToInt8Ty);
1022    ArgTys.push_back(PtrDiffTy);
1023    FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
1024
1025    // FIXME: Calculate better hint.
1026    llvm::Value *hint = llvm::ConstantInt::get(PtrDiffTy, -1ULL);
1027
1028    assert(SrcTy->isRecordType() && "Src type must be record type!");
1029    assert(DestTy->isRecordType() && "Dest type must be record type!");
1030
1031    llvm::Value *SrcArg
1032      = CGM.GetAddrOfRTTIDescriptor(SrcTy.getUnqualifiedType());
1033    llvm::Value *DestArg
1034      = CGM.GetAddrOfRTTIDescriptor(DestTy.getUnqualifiedType());
1035
1036    V = Builder.CreateBitCast(V, PtrToInt8Ty);
1037    V = Builder.CreateCall4(CGM.CreateRuntimeFunction(FTy, "__dynamic_cast"),
1038                            V, SrcArg, DestArg, hint);
1039    V = Builder.CreateBitCast(V, LTy);
1040
1041    if (ThrowOnBad) {
1042      BadCastBlock = createBasicBlock();
1043      Builder.CreateCondBr(Builder.CreateIsNotNull(V), ContBlock, BadCastBlock);
1044      EmitBlock(BadCastBlock);
1045      /// Invoke __cxa_bad_cast
1046      ResultType = llvm::Type::getVoidTy(VMContext);
1047      const llvm::FunctionType *FBadTy;
1048      FBadTy = llvm::FunctionType::get(ResultType, false);
1049      llvm::Value *F = CGM.CreateRuntimeFunction(FBadTy, "__cxa_bad_cast");
1050      if (llvm::BasicBlock *InvokeDest = getInvokeDest()) {
1051        llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
1052        Builder.CreateInvoke(F, Cont, InvokeDest)->setDoesNotReturn();
1053        EmitBlock(Cont);
1054      } else {
1055        // FIXME: Does this ever make sense?
1056        Builder.CreateCall(F)->setDoesNotReturn();
1057      }
1058      Builder.CreateUnreachable();
1059    }
1060  }
1061
1062  if (CanBeZero) {
1063    Builder.CreateBr(ContBlock);
1064    EmitBlock(NullBlock);
1065    Builder.CreateBr(ContBlock);
1066  }
1067  EmitBlock(ContBlock);
1068  if (CanBeZero) {
1069    llvm::PHINode *PHI = Builder.CreatePHI(LTy);
1070    PHI->reserveOperandSpace(2);
1071    PHI->addIncoming(V, NonZeroBlock);
1072    PHI->addIncoming(llvm::Constant::getNullValue(LTy), NullBlock);
1073    V = PHI;
1074  }
1075
1076  return V;
1077}
1078