1//===--- CGClass.cpp - Emit LLVM Code for C++ classes ---------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This contains code dealing with C++ code generation of classes
11//
12//===----------------------------------------------------------------------===//
13
14#include "CGBlocks.h"
15#include "CGCXXABI.h"
16#include "CGDebugInfo.h"
17#include "CGRecordLayout.h"
18#include "CodeGenFunction.h"
19#include "clang/AST/CXXInheritance.h"
20#include "clang/AST/DeclTemplate.h"
21#include "clang/AST/EvaluatedExprVisitor.h"
22#include "clang/AST/RecordLayout.h"
23#include "clang/AST/StmtCXX.h"
24#include "clang/Basic/TargetBuiltins.h"
25#include "clang/CodeGen/CGFunctionInfo.h"
26#include "clang/Frontend/CodeGenOptions.h"
27
28using namespace clang;
29using namespace CodeGen;
30
31static CharUnits
32ComputeNonVirtualBaseClassOffset(ASTContext &Context,
33                                 const CXXRecordDecl *DerivedClass,
34                                 CastExpr::path_const_iterator Start,
35                                 CastExpr::path_const_iterator End) {
36  CharUnits Offset = CharUnits::Zero();
37
38  const CXXRecordDecl *RD = DerivedClass;
39
40  for (CastExpr::path_const_iterator I = Start; I != End; ++I) {
41    const CXXBaseSpecifier *Base = *I;
42    assert(!Base->isVirtual() && "Should not see virtual bases here!");
43
44    // Get the layout.
45    const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
46
47    const CXXRecordDecl *BaseDecl =
48      cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
49
50    // Add the offset.
51    Offset += Layout.getBaseClassOffset(BaseDecl);
52
53    RD = BaseDecl;
54  }
55
56  return Offset;
57}
58
59llvm::Constant *
60CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl,
61                                   CastExpr::path_const_iterator PathBegin,
62                                   CastExpr::path_const_iterator PathEnd) {
63  assert(PathBegin != PathEnd && "Base path should not be empty!");
64
65  CharUnits Offset =
66    ComputeNonVirtualBaseClassOffset(getContext(), ClassDecl,
67                                     PathBegin, PathEnd);
68  if (Offset.isZero())
69    return nullptr;
70
71  llvm::Type *PtrDiffTy =
72  Types.ConvertType(getContext().getPointerDiffType());
73
74  return llvm::ConstantInt::get(PtrDiffTy, Offset.getQuantity());
75}
76
77/// Gets the address of a direct base class within a complete object.
78/// This should only be used for (1) non-virtual bases or (2) virtual bases
79/// when the type is known to be complete (e.g. in complete destructors).
80///
81/// The object pointed to by 'This' is assumed to be non-null.
82llvm::Value *
83CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This,
84                                                   const CXXRecordDecl *Derived,
85                                                   const CXXRecordDecl *Base,
86                                                   bool BaseIsVirtual) {
87  // 'this' must be a pointer (in some address space) to Derived.
88  assert(This->getType()->isPointerTy() &&
89         cast<llvm::PointerType>(This->getType())->getElementType()
90           == ConvertType(Derived));
91
92  // Compute the offset of the virtual base.
93  CharUnits Offset;
94  const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived);
95  if (BaseIsVirtual)
96    Offset = Layout.getVBaseClassOffset(Base);
97  else
98    Offset = Layout.getBaseClassOffset(Base);
99
100  // Shift and cast down to the base type.
101  // TODO: for complete types, this should be possible with a GEP.
102  llvm::Value *V = This;
103  if (Offset.isPositive()) {
104    V = Builder.CreateBitCast(V, Int8PtrTy);
105    V = Builder.CreateConstInBoundsGEP1_64(V, Offset.getQuantity());
106  }
107  V = Builder.CreateBitCast(V, ConvertType(Base)->getPointerTo());
108
109  return V;
110}
111
112static llvm::Value *
113ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ptr,
114                                CharUnits nonVirtualOffset,
115                                llvm::Value *virtualOffset) {
116  // Assert that we have something to do.
117  assert(!nonVirtualOffset.isZero() || virtualOffset != nullptr);
118
119  // Compute the offset from the static and dynamic components.
120  llvm::Value *baseOffset;
121  if (!nonVirtualOffset.isZero()) {
122    baseOffset = llvm::ConstantInt::get(CGF.PtrDiffTy,
123                                        nonVirtualOffset.getQuantity());
124    if (virtualOffset) {
125      baseOffset = CGF.Builder.CreateAdd(virtualOffset, baseOffset);
126    }
127  } else {
128    baseOffset = virtualOffset;
129  }
130
131  // Apply the base offset.
132  ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8PtrTy);
133  ptr = CGF.Builder.CreateInBoundsGEP(ptr, baseOffset, "add.ptr");
134  return ptr;
135}
136
137llvm::Value *
138CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
139                                       const CXXRecordDecl *Derived,
140                                       CastExpr::path_const_iterator PathBegin,
141                                       CastExpr::path_const_iterator PathEnd,
142                                       bool NullCheckValue) {
143  assert(PathBegin != PathEnd && "Base path should not be empty!");
144
145  CastExpr::path_const_iterator Start = PathBegin;
146  const CXXRecordDecl *VBase = nullptr;
147
148  // Sema has done some convenient canonicalization here: if the
149  // access path involved any virtual steps, the conversion path will
150  // *start* with a step down to the correct virtual base subobject,
151  // and hence will not require any further steps.
152  if ((*Start)->isVirtual()) {
153    VBase =
154      cast<CXXRecordDecl>((*Start)->getType()->getAs<RecordType>()->getDecl());
155    ++Start;
156  }
157
158  // Compute the static offset of the ultimate destination within its
159  // allocating subobject (the virtual base, if there is one, or else
160  // the "complete" object that we see).
161  CharUnits NonVirtualOffset =
162    ComputeNonVirtualBaseClassOffset(getContext(), VBase ? VBase : Derived,
163                                     Start, PathEnd);
164
165  // If there's a virtual step, we can sometimes "devirtualize" it.
166  // For now, that's limited to when the derived type is final.
167  // TODO: "devirtualize" this for accesses to known-complete objects.
168  if (VBase && Derived->hasAttr<FinalAttr>()) {
169    const ASTRecordLayout &layout = getContext().getASTRecordLayout(Derived);
170    CharUnits vBaseOffset = layout.getVBaseClassOffset(VBase);
171    NonVirtualOffset += vBaseOffset;
172    VBase = nullptr; // we no longer have a virtual step
173  }
174
175  // Get the base pointer type.
176  llvm::Type *BasePtrTy =
177    ConvertType((PathEnd[-1])->getType())->getPointerTo();
178
179  // If the static offset is zero and we don't have a virtual step,
180  // just do a bitcast; null checks are unnecessary.
181  if (NonVirtualOffset.isZero() && !VBase) {
182    return Builder.CreateBitCast(Value, BasePtrTy);
183  }
184
185  llvm::BasicBlock *origBB = nullptr;
186  llvm::BasicBlock *endBB = nullptr;
187
188  // Skip over the offset (and the vtable load) if we're supposed to
189  // null-check the pointer.
190  if (NullCheckValue) {
191    origBB = Builder.GetInsertBlock();
192    llvm::BasicBlock *notNullBB = createBasicBlock("cast.notnull");
193    endBB = createBasicBlock("cast.end");
194
195    llvm::Value *isNull = Builder.CreateIsNull(Value);
196    Builder.CreateCondBr(isNull, endBB, notNullBB);
197    EmitBlock(notNullBB);
198  }
199
200  // Compute the virtual offset.
201  llvm::Value *VirtualOffset = nullptr;
202  if (VBase) {
203    VirtualOffset =
204      CGM.getCXXABI().GetVirtualBaseClassOffset(*this, Value, Derived, VBase);
205  }
206
207  // Apply both offsets.
208  Value = ApplyNonVirtualAndVirtualOffset(*this, Value,
209                                          NonVirtualOffset,
210                                          VirtualOffset);
211
212  // Cast to the destination type.
213  Value = Builder.CreateBitCast(Value, BasePtrTy);
214
215  // Build a phi if we needed a null check.
216  if (NullCheckValue) {
217    llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
218    Builder.CreateBr(endBB);
219    EmitBlock(endBB);
220
221    llvm::PHINode *PHI = Builder.CreatePHI(BasePtrTy, 2, "cast.result");
222    PHI->addIncoming(Value, notNullBB);
223    PHI->addIncoming(llvm::Constant::getNullValue(BasePtrTy), origBB);
224    Value = PHI;
225  }
226
227  return Value;
228}
229
230llvm::Value *
231CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
232                                          const CXXRecordDecl *Derived,
233                                        CastExpr::path_const_iterator PathBegin,
234                                          CastExpr::path_const_iterator PathEnd,
235                                          bool NullCheckValue) {
236  assert(PathBegin != PathEnd && "Base path should not be empty!");
237
238  QualType DerivedTy =
239    getContext().getCanonicalType(getContext().getTagDeclType(Derived));
240  llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo();
241
242  llvm::Value *NonVirtualOffset =
243    CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd);
244
245  if (!NonVirtualOffset) {
246    // No offset, we can just cast back.
247    return Builder.CreateBitCast(Value, DerivedPtrTy);
248  }
249
250  llvm::BasicBlock *CastNull = nullptr;
251  llvm::BasicBlock *CastNotNull = nullptr;
252  llvm::BasicBlock *CastEnd = nullptr;
253
254  if (NullCheckValue) {
255    CastNull = createBasicBlock("cast.null");
256    CastNotNull = createBasicBlock("cast.notnull");
257    CastEnd = createBasicBlock("cast.end");
258
259    llvm::Value *IsNull = Builder.CreateIsNull(Value);
260    Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
261    EmitBlock(CastNotNull);
262  }
263
264  // Apply the offset.
265  Value = Builder.CreateBitCast(Value, Int8PtrTy);
266  Value = Builder.CreateGEP(Value, Builder.CreateNeg(NonVirtualOffset),
267                            "sub.ptr");
268
269  // Just cast.
270  Value = Builder.CreateBitCast(Value, DerivedPtrTy);
271
272  if (NullCheckValue) {
273    Builder.CreateBr(CastEnd);
274    EmitBlock(CastNull);
275    Builder.CreateBr(CastEnd);
276    EmitBlock(CastEnd);
277
278    llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
279    PHI->addIncoming(Value, CastNotNull);
280    PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()),
281                     CastNull);
282    Value = PHI;
283  }
284
285  return Value;
286}
287
288llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD,
289                                              bool ForVirtualBase,
290                                              bool Delegating) {
291  if (!CGM.getCXXABI().NeedsVTTParameter(GD)) {
292    // This constructor/destructor does not need a VTT parameter.
293    return nullptr;
294  }
295
296  const CXXRecordDecl *RD = cast<CXXMethodDecl>(CurCodeDecl)->getParent();
297  const CXXRecordDecl *Base = cast<CXXMethodDecl>(GD.getDecl())->getParent();
298
299  llvm::Value *VTT;
300
301  uint64_t SubVTTIndex;
302
303  if (Delegating) {
304    // If this is a delegating constructor call, just load the VTT.
305    return LoadCXXVTT();
306  } else if (RD == Base) {
307    // If the record matches the base, this is the complete ctor/dtor
308    // variant calling the base variant in a class with virtual bases.
309    assert(!CGM.getCXXABI().NeedsVTTParameter(CurGD) &&
310           "doing no-op VTT offset in base dtor/ctor?");
311    assert(!ForVirtualBase && "Can't have same class as virtual base!");
312    SubVTTIndex = 0;
313  } else {
314    const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
315    CharUnits BaseOffset = ForVirtualBase ?
316      Layout.getVBaseClassOffset(Base) :
317      Layout.getBaseClassOffset(Base);
318
319    SubVTTIndex =
320      CGM.getVTables().getSubVTTIndex(RD, BaseSubobject(Base, BaseOffset));
321    assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!");
322  }
323
324  if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) {
325    // A VTT parameter was passed to the constructor, use it.
326    VTT = LoadCXXVTT();
327    VTT = Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex);
328  } else {
329    // We're the complete constructor, so get the VTT by name.
330    VTT = CGM.getVTables().GetAddrOfVTT(RD);
331    VTT = Builder.CreateConstInBoundsGEP2_64(VTT, 0, SubVTTIndex);
332  }
333
334  return VTT;
335}
336
337namespace {
338  /// Call the destructor for a direct base class.
339  struct CallBaseDtor : EHScopeStack::Cleanup {
340    const CXXRecordDecl *BaseClass;
341    bool BaseIsVirtual;
342    CallBaseDtor(const CXXRecordDecl *Base, bool BaseIsVirtual)
343      : BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {}
344
345    void Emit(CodeGenFunction &CGF, Flags flags) override {
346      const CXXRecordDecl *DerivedClass =
347        cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent();
348
349      const CXXDestructorDecl *D = BaseClass->getDestructor();
350      llvm::Value *Addr =
351        CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThis(),
352                                                  DerivedClass, BaseClass,
353                                                  BaseIsVirtual);
354      CGF.EmitCXXDestructorCall(D, Dtor_Base, BaseIsVirtual,
355                                /*Delegating=*/false, Addr);
356    }
357  };
358
359  /// A visitor which checks whether an initializer uses 'this' in a
360  /// way which requires the vtable to be properly set.
361  struct DynamicThisUseChecker : EvaluatedExprVisitor<DynamicThisUseChecker> {
362    typedef EvaluatedExprVisitor<DynamicThisUseChecker> super;
363
364    bool UsesThis;
365
366    DynamicThisUseChecker(ASTContext &C) : super(C), UsesThis(false) {}
367
368    // Black-list all explicit and implicit references to 'this'.
369    //
370    // Do we need to worry about external references to 'this' derived
371    // from arbitrary code?  If so, then anything which runs arbitrary
372    // external code might potentially access the vtable.
373    void VisitCXXThisExpr(CXXThisExpr *E) { UsesThis = true; }
374  };
375}
376
377static bool BaseInitializerUsesThis(ASTContext &C, const Expr *Init) {
378  DynamicThisUseChecker Checker(C);
379  Checker.Visit(const_cast<Expr*>(Init));
380  return Checker.UsesThis;
381}
382
383static void EmitBaseInitializer(CodeGenFunction &CGF,
384                                const CXXRecordDecl *ClassDecl,
385                                CXXCtorInitializer *BaseInit,
386                                CXXCtorType CtorType) {
387  assert(BaseInit->isBaseInitializer() &&
388         "Must have base initializer!");
389
390  llvm::Value *ThisPtr = CGF.LoadCXXThis();
391
392  const Type *BaseType = BaseInit->getBaseClass();
393  CXXRecordDecl *BaseClassDecl =
394    cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl());
395
396  bool isBaseVirtual = BaseInit->isBaseVirtual();
397
398  // The base constructor doesn't construct virtual bases.
399  if (CtorType == Ctor_Base && isBaseVirtual)
400    return;
401
402  // If the initializer for the base (other than the constructor
403  // itself) accesses 'this' in any way, we need to initialize the
404  // vtables.
405  if (BaseInitializerUsesThis(CGF.getContext(), BaseInit->getInit()))
406    CGF.InitializeVTablePointers(ClassDecl);
407
408  // We can pretend to be a complete class because it only matters for
409  // virtual bases, and we only do virtual bases for complete ctors.
410  llvm::Value *V =
411    CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl,
412                                              BaseClassDecl,
413                                              isBaseVirtual);
414  CharUnits Alignment = CGF.getContext().getTypeAlignInChars(BaseType);
415  AggValueSlot AggSlot =
416    AggValueSlot::forAddr(V, Alignment, Qualifiers(),
417                          AggValueSlot::IsDestructed,
418                          AggValueSlot::DoesNotNeedGCBarriers,
419                          AggValueSlot::IsNotAliased);
420
421  CGF.EmitAggExpr(BaseInit->getInit(), AggSlot);
422
423  if (CGF.CGM.getLangOpts().Exceptions &&
424      !BaseClassDecl->hasTrivialDestructor())
425    CGF.EHStack.pushCleanup<CallBaseDtor>(EHCleanup, BaseClassDecl,
426                                          isBaseVirtual);
427}
428
429static void EmitAggMemberInitializer(CodeGenFunction &CGF,
430                                     LValue LHS,
431                                     Expr *Init,
432                                     llvm::Value *ArrayIndexVar,
433                                     QualType T,
434                                     ArrayRef<VarDecl *> ArrayIndexes,
435                                     unsigned Index) {
436  if (Index == ArrayIndexes.size()) {
437    LValue LV = LHS;
438
439    if (ArrayIndexVar) {
440      // If we have an array index variable, load it and use it as an offset.
441      // Then, increment the value.
442      llvm::Value *Dest = LHS.getAddress();
443      llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar);
444      Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress");
445      llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1);
446      Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc");
447      CGF.Builder.CreateStore(Next, ArrayIndexVar);
448
449      // Update the LValue.
450      LV.setAddress(Dest);
451      CharUnits Align = CGF.getContext().getTypeAlignInChars(T);
452      LV.setAlignment(std::min(Align, LV.getAlignment()));
453    }
454
455    switch (CGF.getEvaluationKind(T)) {
456    case TEK_Scalar:
457      CGF.EmitScalarInit(Init, /*decl*/ nullptr, LV, false);
458      break;
459    case TEK_Complex:
460      CGF.EmitComplexExprIntoLValue(Init, LV, /*isInit*/ true);
461      break;
462    case TEK_Aggregate: {
463      AggValueSlot Slot =
464        AggValueSlot::forLValue(LV,
465                                AggValueSlot::IsDestructed,
466                                AggValueSlot::DoesNotNeedGCBarriers,
467                                AggValueSlot::IsNotAliased);
468
469      CGF.EmitAggExpr(Init, Slot);
470      break;
471    }
472    }
473
474    return;
475  }
476
477  const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(T);
478  assert(Array && "Array initialization without the array type?");
479  llvm::Value *IndexVar
480    = CGF.GetAddrOfLocalVar(ArrayIndexes[Index]);
481  assert(IndexVar && "Array index variable not loaded");
482
483  // Initialize this index variable to zero.
484  llvm::Value* Zero
485    = llvm::Constant::getNullValue(
486                              CGF.ConvertType(CGF.getContext().getSizeType()));
487  CGF.Builder.CreateStore(Zero, IndexVar);
488
489  // Start the loop with a block that tests the condition.
490  llvm::BasicBlock *CondBlock = CGF.createBasicBlock("for.cond");
491  llvm::BasicBlock *AfterFor = CGF.createBasicBlock("for.end");
492
493  CGF.EmitBlock(CondBlock);
494
495  llvm::BasicBlock *ForBody = CGF.createBasicBlock("for.body");
496  // Generate: if (loop-index < number-of-elements) fall to the loop body,
497  // otherwise, go to the block after the for-loop.
498  uint64_t NumElements = Array->getSize().getZExtValue();
499  llvm::Value *Counter = CGF.Builder.CreateLoad(IndexVar);
500  llvm::Value *NumElementsPtr =
501    llvm::ConstantInt::get(Counter->getType(), NumElements);
502  llvm::Value *IsLess = CGF.Builder.CreateICmpULT(Counter, NumElementsPtr,
503                                                  "isless");
504
505  // If the condition is true, execute the body.
506  CGF.Builder.CreateCondBr(IsLess, ForBody, AfterFor);
507
508  CGF.EmitBlock(ForBody);
509  llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc");
510
511  // Inside the loop body recurse to emit the inner loop or, eventually, the
512  // constructor call.
513  EmitAggMemberInitializer(CGF, LHS, Init, ArrayIndexVar,
514                           Array->getElementType(), ArrayIndexes, Index + 1);
515
516  CGF.EmitBlock(ContinueBlock);
517
518  // Emit the increment of the loop counter.
519  llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1);
520  Counter = CGF.Builder.CreateLoad(IndexVar);
521  NextVal = CGF.Builder.CreateAdd(Counter, NextVal, "inc");
522  CGF.Builder.CreateStore(NextVal, IndexVar);
523
524  // Finally, branch back up to the condition for the next iteration.
525  CGF.EmitBranch(CondBlock);
526
527  // Emit the fall-through block.
528  CGF.EmitBlock(AfterFor, true);
529}
530
531static void EmitMemberInitializer(CodeGenFunction &CGF,
532                                  const CXXRecordDecl *ClassDecl,
533                                  CXXCtorInitializer *MemberInit,
534                                  const CXXConstructorDecl *Constructor,
535                                  FunctionArgList &Args) {
536  assert(MemberInit->isAnyMemberInitializer() &&
537         "Must have member initializer!");
538  assert(MemberInit->getInit() && "Must have initializer!");
539
540  // non-static data member initializers.
541  FieldDecl *Field = MemberInit->getAnyMember();
542  QualType FieldType = Field->getType();
543
544  llvm::Value *ThisPtr = CGF.LoadCXXThis();
545  QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
546  LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
547
548  if (MemberInit->isIndirectMemberInitializer()) {
549    // If we are initializing an anonymous union field, drill down to
550    // the field.
551    IndirectFieldDecl *IndirectField = MemberInit->getIndirectMember();
552    for (const auto *I : IndirectField->chain())
553      LHS = CGF.EmitLValueForFieldInitialization(LHS, cast<FieldDecl>(I));
554    FieldType = MemberInit->getIndirectMember()->getAnonField()->getType();
555  } else {
556    LHS = CGF.EmitLValueForFieldInitialization(LHS, Field);
557  }
558
559  // Special case: if we are in a copy or move constructor, and we are copying
560  // an array of PODs or classes with trivial copy constructors, ignore the
561  // AST and perform the copy we know is equivalent.
562  // FIXME: This is hacky at best... if we had a bit more explicit information
563  // in the AST, we could generalize it more easily.
564  const ConstantArrayType *Array
565    = CGF.getContext().getAsConstantArrayType(FieldType);
566  if (Array && Constructor->isDefaulted() &&
567      Constructor->isCopyOrMoveConstructor()) {
568    QualType BaseElementTy = CGF.getContext().getBaseElementType(Array);
569    CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit());
570    if (BaseElementTy.isPODType(CGF.getContext()) ||
571        (CE && CE->getConstructor()->isTrivial())) {
572      // Find the source pointer. We know it's the last argument because
573      // we know we're in an implicit copy constructor.
574      unsigned SrcArgIndex = Args.size() - 1;
575      llvm::Value *SrcPtr
576        = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex]));
577      LValue ThisRHSLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy);
578      LValue Src = CGF.EmitLValueForFieldInitialization(ThisRHSLV, Field);
579
580      // Copy the aggregate.
581      CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType,
582                            LHS.isVolatileQualified());
583      return;
584    }
585  }
586
587  ArrayRef<VarDecl *> ArrayIndexes;
588  if (MemberInit->getNumArrayIndices())
589    ArrayIndexes = MemberInit->getArrayIndexes();
590  CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit(), ArrayIndexes);
591}
592
593void CodeGenFunction::EmitInitializerForField(FieldDecl *Field,
594                                              LValue LHS, Expr *Init,
595                                             ArrayRef<VarDecl *> ArrayIndexes) {
596  QualType FieldType = Field->getType();
597  switch (getEvaluationKind(FieldType)) {
598  case TEK_Scalar:
599    if (LHS.isSimple()) {
600      EmitExprAsInit(Init, Field, LHS, false);
601    } else {
602      RValue RHS = RValue::get(EmitScalarExpr(Init));
603      EmitStoreThroughLValue(RHS, LHS);
604    }
605    break;
606  case TEK_Complex:
607    EmitComplexExprIntoLValue(Init, LHS, /*isInit*/ true);
608    break;
609  case TEK_Aggregate: {
610    llvm::Value *ArrayIndexVar = nullptr;
611    if (ArrayIndexes.size()) {
612      llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
613
614      // The LHS is a pointer to the first object we'll be constructing, as
615      // a flat array.
616      QualType BaseElementTy = getContext().getBaseElementType(FieldType);
617      llvm::Type *BasePtr = ConvertType(BaseElementTy);
618      BasePtr = llvm::PointerType::getUnqual(BasePtr);
619      llvm::Value *BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(),
620                                                       BasePtr);
621      LHS = MakeAddrLValue(BaseAddrPtr, BaseElementTy);
622
623      // Create an array index that will be used to walk over all of the
624      // objects we're constructing.
625      ArrayIndexVar = CreateTempAlloca(SizeTy, "object.index");
626      llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
627      Builder.CreateStore(Zero, ArrayIndexVar);
628
629
630      // Emit the block variables for the array indices, if any.
631      for (unsigned I = 0, N = ArrayIndexes.size(); I != N; ++I)
632        EmitAutoVarDecl(*ArrayIndexes[I]);
633    }
634
635    EmitAggMemberInitializer(*this, LHS, Init, ArrayIndexVar, FieldType,
636                             ArrayIndexes, 0);
637  }
638  }
639
640  // Ensure that we destroy this object if an exception is thrown
641  // later in the constructor.
642  QualType::DestructionKind dtorKind = FieldType.isDestructedType();
643  if (needsEHCleanup(dtorKind))
644    pushEHDestroy(dtorKind, LHS.getAddress(), FieldType);
645}
646
647/// Checks whether the given constructor is a valid subject for the
648/// complete-to-base constructor delegation optimization, i.e.
649/// emitting the complete constructor as a simple call to the base
650/// constructor.
651static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor) {
652
653  // Currently we disable the optimization for classes with virtual
654  // bases because (1) the addresses of parameter variables need to be
655  // consistent across all initializers but (2) the delegate function
656  // call necessarily creates a second copy of the parameter variable.
657  //
658  // The limiting example (purely theoretical AFAIK):
659  //   struct A { A(int &c) { c++; } };
660  //   struct B : virtual A {
661  //     B(int count) : A(count) { printf("%d\n", count); }
662  //   };
663  // ...although even this example could in principle be emitted as a
664  // delegation since the address of the parameter doesn't escape.
665  if (Ctor->getParent()->getNumVBases()) {
666    // TODO: white-list trivial vbase initializers.  This case wouldn't
667    // be subject to the restrictions below.
668
669    // TODO: white-list cases where:
670    //  - there are no non-reference parameters to the constructor
671    //  - the initializers don't access any non-reference parameters
672    //  - the initializers don't take the address of non-reference
673    //    parameters
674    //  - etc.
675    // If we ever add any of the above cases, remember that:
676    //  - function-try-blocks will always blacklist this optimization
677    //  - we need to perform the constructor prologue and cleanup in
678    //    EmitConstructorBody.
679
680    return false;
681  }
682
683  // We also disable the optimization for variadic functions because
684  // it's impossible to "re-pass" varargs.
685  if (Ctor->getType()->getAs<FunctionProtoType>()->isVariadic())
686    return false;
687
688  // FIXME: Decide if we can do a delegation of a delegating constructor.
689  if (Ctor->isDelegatingConstructor())
690    return false;
691
692  return true;
693}
694
695/// EmitConstructorBody - Emits the body of the current constructor.
696void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
697  const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(CurGD.getDecl());
698  CXXCtorType CtorType = CurGD.getCtorType();
699
700  assert((CGM.getTarget().getCXXABI().hasConstructorVariants() ||
701          CtorType == Ctor_Complete) &&
702         "can only generate complete ctor for this ABI");
703
704  // Before we go any further, try the complete->base constructor
705  // delegation optimization.
706  if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor) &&
707      CGM.getTarget().getCXXABI().hasConstructorVariants()) {
708    if (CGDebugInfo *DI = getDebugInfo())
709      DI->EmitLocation(Builder, Ctor->getLocEnd());
710    EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args, Ctor->getLocEnd());
711    return;
712  }
713
714  Stmt *Body = Ctor->getBody();
715
716  // Enter the function-try-block before the constructor prologue if
717  // applicable.
718  bool IsTryBody = (Body && isa<CXXTryStmt>(Body));
719  if (IsTryBody)
720    EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
721
722  RegionCounter Cnt = getPGORegionCounter(Body);
723  Cnt.beginRegion(Builder);
724
725  RunCleanupsScope RunCleanups(*this);
726
727  // TODO: in restricted cases, we can emit the vbase initializers of
728  // a complete ctor and then delegate to the base ctor.
729
730  // Emit the constructor prologue, i.e. the base and member
731  // initializers.
732  EmitCtorPrologue(Ctor, CtorType, Args);
733
734  // Emit the body of the statement.
735  if (IsTryBody)
736    EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
737  else if (Body)
738    EmitStmt(Body);
739
740  // Emit any cleanup blocks associated with the member or base
741  // initializers, which includes (along the exceptional path) the
742  // destructors for those members and bases that were fully
743  // constructed.
744  RunCleanups.ForceCleanup();
745
746  if (IsTryBody)
747    ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
748}
749
750namespace {
751  /// RAII object to indicate that codegen is copying the value representation
752  /// instead of the object representation. Useful when copying a struct or
753  /// class which has uninitialized members and we're only performing
754  /// lvalue-to-rvalue conversion on the object but not its members.
755  class CopyingValueRepresentation {
756  public:
757    explicit CopyingValueRepresentation(CodeGenFunction &CGF)
758        : CGF(CGF), SO(*CGF.SanOpts), OldSanOpts(CGF.SanOpts) {
759      SO.Bool = false;
760      SO.Enum = false;
761      CGF.SanOpts = &SO;
762    }
763    ~CopyingValueRepresentation() {
764      CGF.SanOpts = OldSanOpts;
765    }
766  private:
767    CodeGenFunction &CGF;
768    SanitizerOptions SO;
769    const SanitizerOptions *OldSanOpts;
770  };
771}
772
773namespace {
774  class FieldMemcpyizer {
775  public:
776    FieldMemcpyizer(CodeGenFunction &CGF, const CXXRecordDecl *ClassDecl,
777                    const VarDecl *SrcRec)
778      : CGF(CGF), ClassDecl(ClassDecl), SrcRec(SrcRec),
779        RecLayout(CGF.getContext().getASTRecordLayout(ClassDecl)),
780        FirstField(nullptr), LastField(nullptr), FirstFieldOffset(0),
781        LastFieldOffset(0), LastAddedFieldIndex(0) {}
782
783    static bool isMemcpyableField(FieldDecl *F) {
784      Qualifiers Qual = F->getType().getQualifiers();
785      if (Qual.hasVolatile() || Qual.hasObjCLifetime())
786        return false;
787      return true;
788    }
789
790    void addMemcpyableField(FieldDecl *F) {
791      if (!FirstField)
792        addInitialField(F);
793      else
794        addNextField(F);
795    }
796
797    CharUnits getMemcpySize() const {
798      unsigned LastFieldSize =
799        LastField->isBitField() ?
800          LastField->getBitWidthValue(CGF.getContext()) :
801          CGF.getContext().getTypeSize(LastField->getType());
802      uint64_t MemcpySizeBits =
803        LastFieldOffset + LastFieldSize - FirstFieldOffset +
804        CGF.getContext().getCharWidth() - 1;
805      CharUnits MemcpySize =
806        CGF.getContext().toCharUnitsFromBits(MemcpySizeBits);
807      return MemcpySize;
808    }
809
810    void emitMemcpy() {
811      // Give the subclass a chance to bail out if it feels the memcpy isn't
812      // worth it (e.g. Hasn't aggregated enough data).
813      if (!FirstField) {
814        return;
815      }
816
817      CharUnits Alignment;
818
819      if (FirstField->isBitField()) {
820        const CGRecordLayout &RL =
821          CGF.getTypes().getCGRecordLayout(FirstField->getParent());
822        const CGBitFieldInfo &BFInfo = RL.getBitFieldInfo(FirstField);
823        Alignment = CharUnits::fromQuantity(BFInfo.StorageAlignment);
824      } else {
825        Alignment = CGF.getContext().getDeclAlign(FirstField);
826      }
827
828      assert((CGF.getContext().toCharUnitsFromBits(FirstFieldOffset) %
829              Alignment) == 0 && "Bad field alignment.");
830
831      CharUnits MemcpySize = getMemcpySize();
832      QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
833      llvm::Value *ThisPtr = CGF.LoadCXXThis();
834      LValue DestLV = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
835      LValue Dest = CGF.EmitLValueForFieldInitialization(DestLV, FirstField);
836      llvm::Value *SrcPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(SrcRec));
837      LValue SrcLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy);
838      LValue Src = CGF.EmitLValueForFieldInitialization(SrcLV, FirstField);
839
840      emitMemcpyIR(Dest.isBitField() ? Dest.getBitFieldAddr() : Dest.getAddress(),
841                   Src.isBitField() ? Src.getBitFieldAddr() : Src.getAddress(),
842                   MemcpySize, Alignment);
843      reset();
844    }
845
846    void reset() {
847      FirstField = nullptr;
848    }
849
850  protected:
851    CodeGenFunction &CGF;
852    const CXXRecordDecl *ClassDecl;
853
854  private:
855
856    void emitMemcpyIR(llvm::Value *DestPtr, llvm::Value *SrcPtr,
857                      CharUnits Size, CharUnits Alignment) {
858      llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
859      llvm::Type *DBP =
860        llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), DPT->getAddressSpace());
861      DestPtr = CGF.Builder.CreateBitCast(DestPtr, DBP);
862
863      llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
864      llvm::Type *SBP =
865        llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), SPT->getAddressSpace());
866      SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, SBP);
867
868      CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, Size.getQuantity(),
869                               Alignment.getQuantity());
870    }
871
872    void addInitialField(FieldDecl *F) {
873        FirstField = F;
874        LastField = F;
875        FirstFieldOffset = RecLayout.getFieldOffset(F->getFieldIndex());
876        LastFieldOffset = FirstFieldOffset;
877        LastAddedFieldIndex = F->getFieldIndex();
878        return;
879      }
880
881    void addNextField(FieldDecl *F) {
882      // For the most part, the following invariant will hold:
883      //   F->getFieldIndex() == LastAddedFieldIndex + 1
884      // The one exception is that Sema won't add a copy-initializer for an
885      // unnamed bitfield, which will show up here as a gap in the sequence.
886      assert(F->getFieldIndex() >= LastAddedFieldIndex + 1 &&
887             "Cannot aggregate fields out of order.");
888      LastAddedFieldIndex = F->getFieldIndex();
889
890      // The 'first' and 'last' fields are chosen by offset, rather than field
891      // index. This allows the code to support bitfields, as well as regular
892      // fields.
893      uint64_t FOffset = RecLayout.getFieldOffset(F->getFieldIndex());
894      if (FOffset < FirstFieldOffset) {
895        FirstField = F;
896        FirstFieldOffset = FOffset;
897      } else if (FOffset > LastFieldOffset) {
898        LastField = F;
899        LastFieldOffset = FOffset;
900      }
901    }
902
903    const VarDecl *SrcRec;
904    const ASTRecordLayout &RecLayout;
905    FieldDecl *FirstField;
906    FieldDecl *LastField;
907    uint64_t FirstFieldOffset, LastFieldOffset;
908    unsigned LastAddedFieldIndex;
909  };
910
911  class ConstructorMemcpyizer : public FieldMemcpyizer {
912  private:
913
914    /// Get source argument for copy constructor. Returns null if not a copy
915    /// constructor.
916    static const VarDecl* getTrivialCopySource(const CXXConstructorDecl *CD,
917                                               FunctionArgList &Args) {
918      if (CD->isCopyOrMoveConstructor() && CD->isDefaulted())
919        return Args[Args.size() - 1];
920      return nullptr;
921    }
922
923    // Returns true if a CXXCtorInitializer represents a member initialization
924    // that can be rolled into a memcpy.
925    bool isMemberInitMemcpyable(CXXCtorInitializer *MemberInit) const {
926      if (!MemcpyableCtor)
927        return false;
928      FieldDecl *Field = MemberInit->getMember();
929      assert(Field && "No field for member init.");
930      QualType FieldType = Field->getType();
931      CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit());
932
933      // Bail out on non-POD, not-trivially-constructable members.
934      if (!(CE && CE->getConstructor()->isTrivial()) &&
935          !(FieldType.isTriviallyCopyableType(CGF.getContext()) ||
936            FieldType->isReferenceType()))
937        return false;
938
939      // Bail out on volatile fields.
940      if (!isMemcpyableField(Field))
941        return false;
942
943      // Otherwise we're good.
944      return true;
945    }
946
947  public:
948    ConstructorMemcpyizer(CodeGenFunction &CGF, const CXXConstructorDecl *CD,
949                          FunctionArgList &Args)
950      : FieldMemcpyizer(CGF, CD->getParent(), getTrivialCopySource(CD, Args)),
951        ConstructorDecl(CD),
952        MemcpyableCtor(CD->isDefaulted() &&
953                       CD->isCopyOrMoveConstructor() &&
954                       CGF.getLangOpts().getGC() == LangOptions::NonGC),
955        Args(Args) { }
956
957    void addMemberInitializer(CXXCtorInitializer *MemberInit) {
958      if (isMemberInitMemcpyable(MemberInit)) {
959        AggregatedInits.push_back(MemberInit);
960        addMemcpyableField(MemberInit->getMember());
961      } else {
962        emitAggregatedInits();
963        EmitMemberInitializer(CGF, ConstructorDecl->getParent(), MemberInit,
964                              ConstructorDecl, Args);
965      }
966    }
967
968    void emitAggregatedInits() {
969      if (AggregatedInits.size() <= 1) {
970        // This memcpy is too small to be worthwhile. Fall back on default
971        // codegen.
972        if (!AggregatedInits.empty()) {
973          CopyingValueRepresentation CVR(CGF);
974          EmitMemberInitializer(CGF, ConstructorDecl->getParent(),
975                                AggregatedInits[0], ConstructorDecl, Args);
976        }
977        reset();
978        return;
979      }
980
981      pushEHDestructors();
982      emitMemcpy();
983      AggregatedInits.clear();
984    }
985
986    void pushEHDestructors() {
987      llvm::Value *ThisPtr = CGF.LoadCXXThis();
988      QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
989      LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
990
991      for (unsigned i = 0; i < AggregatedInits.size(); ++i) {
992        QualType FieldType = AggregatedInits[i]->getMember()->getType();
993        QualType::DestructionKind dtorKind = FieldType.isDestructedType();
994        if (CGF.needsEHCleanup(dtorKind))
995          CGF.pushEHDestroy(dtorKind, LHS.getAddress(), FieldType);
996      }
997    }
998
999    void finish() {
1000      emitAggregatedInits();
1001    }
1002
1003  private:
1004    const CXXConstructorDecl *ConstructorDecl;
1005    bool MemcpyableCtor;
1006    FunctionArgList &Args;
1007    SmallVector<CXXCtorInitializer*, 16> AggregatedInits;
1008  };
1009
1010  class AssignmentMemcpyizer : public FieldMemcpyizer {
1011  private:
1012
1013    // Returns the memcpyable field copied by the given statement, if one
1014    // exists. Otherwise returns null.
1015    FieldDecl *getMemcpyableField(Stmt *S) {
1016      if (!AssignmentsMemcpyable)
1017        return nullptr;
1018      if (BinaryOperator *BO = dyn_cast<BinaryOperator>(S)) {
1019        // Recognise trivial assignments.
1020        if (BO->getOpcode() != BO_Assign)
1021          return nullptr;
1022        MemberExpr *ME = dyn_cast<MemberExpr>(BO->getLHS());
1023        if (!ME)
1024          return nullptr;
1025        FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl());
1026        if (!Field || !isMemcpyableField(Field))
1027          return nullptr;
1028        Stmt *RHS = BO->getRHS();
1029        if (ImplicitCastExpr *EC = dyn_cast<ImplicitCastExpr>(RHS))
1030          RHS = EC->getSubExpr();
1031        if (!RHS)
1032          return nullptr;
1033        MemberExpr *ME2 = dyn_cast<MemberExpr>(RHS);
1034        if (dyn_cast<FieldDecl>(ME2->getMemberDecl()) != Field)
1035          return nullptr;
1036        return Field;
1037      } else if (CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(S)) {
1038        CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MCE->getCalleeDecl());
1039        if (!(MD && (MD->isCopyAssignmentOperator() ||
1040                       MD->isMoveAssignmentOperator()) &&
1041              MD->isTrivial()))
1042          return nullptr;
1043        MemberExpr *IOA = dyn_cast<MemberExpr>(MCE->getImplicitObjectArgument());
1044        if (!IOA)
1045          return nullptr;
1046        FieldDecl *Field = dyn_cast<FieldDecl>(IOA->getMemberDecl());
1047        if (!Field || !isMemcpyableField(Field))
1048          return nullptr;
1049        MemberExpr *Arg0 = dyn_cast<MemberExpr>(MCE->getArg(0));
1050        if (!Arg0 || Field != dyn_cast<FieldDecl>(Arg0->getMemberDecl()))
1051          return nullptr;
1052        return Field;
1053      } else if (CallExpr *CE = dyn_cast<CallExpr>(S)) {
1054        FunctionDecl *FD = dyn_cast<FunctionDecl>(CE->getCalleeDecl());
1055        if (!FD || FD->getBuiltinID() != Builtin::BI__builtin_memcpy)
1056          return nullptr;
1057        Expr *DstPtr = CE->getArg(0);
1058        if (ImplicitCastExpr *DC = dyn_cast<ImplicitCastExpr>(DstPtr))
1059          DstPtr = DC->getSubExpr();
1060        UnaryOperator *DUO = dyn_cast<UnaryOperator>(DstPtr);
1061        if (!DUO || DUO->getOpcode() != UO_AddrOf)
1062          return nullptr;
1063        MemberExpr *ME = dyn_cast<MemberExpr>(DUO->getSubExpr());
1064        if (!ME)
1065          return nullptr;
1066        FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl());
1067        if (!Field || !isMemcpyableField(Field))
1068          return nullptr;
1069        Expr *SrcPtr = CE->getArg(1);
1070        if (ImplicitCastExpr *SC = dyn_cast<ImplicitCastExpr>(SrcPtr))
1071          SrcPtr = SC->getSubExpr();
1072        UnaryOperator *SUO = dyn_cast<UnaryOperator>(SrcPtr);
1073        if (!SUO || SUO->getOpcode() != UO_AddrOf)
1074          return nullptr;
1075        MemberExpr *ME2 = dyn_cast<MemberExpr>(SUO->getSubExpr());
1076        if (!ME2 || Field != dyn_cast<FieldDecl>(ME2->getMemberDecl()))
1077          return nullptr;
1078        return Field;
1079      }
1080
1081      return nullptr;
1082    }
1083
1084    bool AssignmentsMemcpyable;
1085    SmallVector<Stmt*, 16> AggregatedStmts;
1086
1087  public:
1088
1089    AssignmentMemcpyizer(CodeGenFunction &CGF, const CXXMethodDecl *AD,
1090                         FunctionArgList &Args)
1091      : FieldMemcpyizer(CGF, AD->getParent(), Args[Args.size() - 1]),
1092        AssignmentsMemcpyable(CGF.getLangOpts().getGC() == LangOptions::NonGC) {
1093      assert(Args.size() == 2);
1094    }
1095
1096    void emitAssignment(Stmt *S) {
1097      FieldDecl *F = getMemcpyableField(S);
1098      if (F) {
1099        addMemcpyableField(F);
1100        AggregatedStmts.push_back(S);
1101      } else {
1102        emitAggregatedStmts();
1103        CGF.EmitStmt(S);
1104      }
1105    }
1106
1107    void emitAggregatedStmts() {
1108      if (AggregatedStmts.size() <= 1) {
1109        if (!AggregatedStmts.empty()) {
1110          CopyingValueRepresentation CVR(CGF);
1111          CGF.EmitStmt(AggregatedStmts[0]);
1112        }
1113        reset();
1114      }
1115
1116      emitMemcpy();
1117      AggregatedStmts.clear();
1118    }
1119
1120    void finish() {
1121      emitAggregatedStmts();
1122    }
1123  };
1124
1125}
1126
1127/// EmitCtorPrologue - This routine generates necessary code to initialize
1128/// base classes and non-static data members belonging to this constructor.
1129void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
1130                                       CXXCtorType CtorType,
1131                                       FunctionArgList &Args) {
1132  if (CD->isDelegatingConstructor())
1133    return EmitDelegatingCXXConstructorCall(CD, Args);
1134
1135  const CXXRecordDecl *ClassDecl = CD->getParent();
1136
1137  CXXConstructorDecl::init_const_iterator B = CD->init_begin(),
1138                                          E = CD->init_end();
1139
1140  llvm::BasicBlock *BaseCtorContinueBB = nullptr;
1141  if (ClassDecl->getNumVBases() &&
1142      !CGM.getTarget().getCXXABI().hasConstructorVariants()) {
1143    // The ABIs that don't have constructor variants need to put a branch
1144    // before the virtual base initialization code.
1145    BaseCtorContinueBB =
1146      CGM.getCXXABI().EmitCtorCompleteObjectHandler(*this, ClassDecl);
1147    assert(BaseCtorContinueBB);
1148  }
1149
1150  // Virtual base initializers first.
1151  for (; B != E && (*B)->isBaseInitializer() && (*B)->isBaseVirtual(); B++) {
1152    EmitBaseInitializer(*this, ClassDecl, *B, CtorType);
1153  }
1154
1155  if (BaseCtorContinueBB) {
1156    // Complete object handler should continue to the remaining initializers.
1157    Builder.CreateBr(BaseCtorContinueBB);
1158    EmitBlock(BaseCtorContinueBB);
1159  }
1160
1161  // Then, non-virtual base initializers.
1162  for (; B != E && (*B)->isBaseInitializer(); B++) {
1163    assert(!(*B)->isBaseVirtual());
1164    EmitBaseInitializer(*this, ClassDecl, *B, CtorType);
1165  }
1166
1167  InitializeVTablePointers(ClassDecl);
1168
1169  // And finally, initialize class members.
1170  FieldConstructionScope FCS(*this, CXXThisValue);
1171  ConstructorMemcpyizer CM(*this, CD, Args);
1172  for (; B != E; B++) {
1173    CXXCtorInitializer *Member = (*B);
1174    assert(!Member->isBaseInitializer());
1175    assert(Member->isAnyMemberInitializer() &&
1176           "Delegating initializer on non-delegating constructor");
1177    CM.addMemberInitializer(Member);
1178  }
1179  CM.finish();
1180}
1181
1182static bool
1183FieldHasTrivialDestructorBody(ASTContext &Context, const FieldDecl *Field);
1184
1185static bool
1186HasTrivialDestructorBody(ASTContext &Context,
1187                         const CXXRecordDecl *BaseClassDecl,
1188                         const CXXRecordDecl *MostDerivedClassDecl)
1189{
1190  // If the destructor is trivial we don't have to check anything else.
1191  if (BaseClassDecl->hasTrivialDestructor())
1192    return true;
1193
1194  if (!BaseClassDecl->getDestructor()->hasTrivialBody())
1195    return false;
1196
1197  // Check fields.
1198  for (const auto *Field : BaseClassDecl->fields())
1199    if (!FieldHasTrivialDestructorBody(Context, Field))
1200      return false;
1201
1202  // Check non-virtual bases.
1203  for (const auto &I : BaseClassDecl->bases()) {
1204    if (I.isVirtual())
1205      continue;
1206
1207    const CXXRecordDecl *NonVirtualBase =
1208      cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
1209    if (!HasTrivialDestructorBody(Context, NonVirtualBase,
1210                                  MostDerivedClassDecl))
1211      return false;
1212  }
1213
1214  if (BaseClassDecl == MostDerivedClassDecl) {
1215    // Check virtual bases.
1216    for (const auto &I : BaseClassDecl->vbases()) {
1217      const CXXRecordDecl *VirtualBase =
1218        cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
1219      if (!HasTrivialDestructorBody(Context, VirtualBase,
1220                                    MostDerivedClassDecl))
1221        return false;
1222    }
1223  }
1224
1225  return true;
1226}
1227
1228static bool
1229FieldHasTrivialDestructorBody(ASTContext &Context,
1230                              const FieldDecl *Field)
1231{
1232  QualType FieldBaseElementType = Context.getBaseElementType(Field->getType());
1233
1234  const RecordType *RT = FieldBaseElementType->getAs<RecordType>();
1235  if (!RT)
1236    return true;
1237
1238  CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
1239  return HasTrivialDestructorBody(Context, FieldClassDecl, FieldClassDecl);
1240}
1241
1242/// CanSkipVTablePointerInitialization - Check whether we need to initialize
1243/// any vtable pointers before calling this destructor.
1244static bool CanSkipVTablePointerInitialization(ASTContext &Context,
1245                                               const CXXDestructorDecl *Dtor) {
1246  if (!Dtor->hasTrivialBody())
1247    return false;
1248
1249  // Check the fields.
1250  const CXXRecordDecl *ClassDecl = Dtor->getParent();
1251  for (const auto *Field : ClassDecl->fields())
1252    if (!FieldHasTrivialDestructorBody(Context, Field))
1253      return false;
1254
1255  return true;
1256}
1257
1258/// EmitDestructorBody - Emits the body of the current destructor.
1259void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
1260  const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CurGD.getDecl());
1261  CXXDtorType DtorType = CurGD.getDtorType();
1262
1263  // The call to operator delete in a deleting destructor happens
1264  // outside of the function-try-block, which means it's always
1265  // possible to delegate the destructor body to the complete
1266  // destructor.  Do so.
1267  if (DtorType == Dtor_Deleting) {
1268    EnterDtorCleanups(Dtor, Dtor_Deleting);
1269    EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false,
1270                          /*Delegating=*/false, LoadCXXThis());
1271    PopCleanupBlock();
1272    return;
1273  }
1274
1275  Stmt *Body = Dtor->getBody();
1276
1277  // If the body is a function-try-block, enter the try before
1278  // anything else.
1279  bool isTryBody = (Body && isa<CXXTryStmt>(Body));
1280  if (isTryBody)
1281    EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
1282
1283  // Enter the epilogue cleanups.
1284  RunCleanupsScope DtorEpilogue(*this);
1285
1286  // If this is the complete variant, just invoke the base variant;
1287  // the epilogue will destruct the virtual bases.  But we can't do
1288  // this optimization if the body is a function-try-block, because
1289  // we'd introduce *two* handler blocks.  In the Microsoft ABI, we
1290  // always delegate because we might not have a definition in this TU.
1291  switch (DtorType) {
1292  case Dtor_Deleting: llvm_unreachable("already handled deleting case");
1293
1294  case Dtor_Complete:
1295    assert((Body || getTarget().getCXXABI().isMicrosoft()) &&
1296           "can't emit a dtor without a body for non-Microsoft ABIs");
1297
1298    // Enter the cleanup scopes for virtual bases.
1299    EnterDtorCleanups(Dtor, Dtor_Complete);
1300
1301    if (!isTryBody) {
1302      EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false,
1303                            /*Delegating=*/false, LoadCXXThis());
1304      break;
1305    }
1306    // Fallthrough: act like we're in the base variant.
1307
1308  case Dtor_Base:
1309    assert(Body);
1310
1311    RegionCounter Cnt = getPGORegionCounter(Body);
1312    Cnt.beginRegion(Builder);
1313
1314    // Enter the cleanup scopes for fields and non-virtual bases.
1315    EnterDtorCleanups(Dtor, Dtor_Base);
1316
1317    // Initialize the vtable pointers before entering the body.
1318    if (!CanSkipVTablePointerInitialization(getContext(), Dtor))
1319        InitializeVTablePointers(Dtor->getParent());
1320
1321    if (isTryBody)
1322      EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
1323    else if (Body)
1324      EmitStmt(Body);
1325    else {
1326      assert(Dtor->isImplicit() && "bodyless dtor not implicit");
1327      // nothing to do besides what's in the epilogue
1328    }
1329    // -fapple-kext must inline any call to this dtor into
1330    // the caller's body.
1331    if (getLangOpts().AppleKext)
1332      CurFn->addFnAttr(llvm::Attribute::AlwaysInline);
1333    break;
1334  }
1335
1336  // Jump out through the epilogue cleanups.
1337  DtorEpilogue.ForceCleanup();
1338
1339  // Exit the try if applicable.
1340  if (isTryBody)
1341    ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
1342}
1343
1344void CodeGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &Args) {
1345  const CXXMethodDecl *AssignOp = cast<CXXMethodDecl>(CurGD.getDecl());
1346  const Stmt *RootS = AssignOp->getBody();
1347  assert(isa<CompoundStmt>(RootS) &&
1348         "Body of an implicit assignment operator should be compound stmt.");
1349  const CompoundStmt *RootCS = cast<CompoundStmt>(RootS);
1350
1351  LexicalScope Scope(*this, RootCS->getSourceRange());
1352
1353  AssignmentMemcpyizer AM(*this, AssignOp, Args);
1354  for (auto *I : RootCS->body())
1355    AM.emitAssignment(I);
1356  AM.finish();
1357}
1358
1359namespace {
1360  /// Call the operator delete associated with the current destructor.
1361  struct CallDtorDelete : EHScopeStack::Cleanup {
1362    CallDtorDelete() {}
1363
1364    void Emit(CodeGenFunction &CGF, Flags flags) override {
1365      const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl);
1366      const CXXRecordDecl *ClassDecl = Dtor->getParent();
1367      CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(),
1368                         CGF.getContext().getTagDeclType(ClassDecl));
1369    }
1370  };
1371
1372  struct CallDtorDeleteConditional : EHScopeStack::Cleanup {
1373    llvm::Value *ShouldDeleteCondition;
1374  public:
1375    CallDtorDeleteConditional(llvm::Value *ShouldDeleteCondition)
1376      : ShouldDeleteCondition(ShouldDeleteCondition) {
1377      assert(ShouldDeleteCondition != nullptr);
1378    }
1379
1380    void Emit(CodeGenFunction &CGF, Flags flags) override {
1381      llvm::BasicBlock *callDeleteBB = CGF.createBasicBlock("dtor.call_delete");
1382      llvm::BasicBlock *continueBB = CGF.createBasicBlock("dtor.continue");
1383      llvm::Value *ShouldCallDelete
1384        = CGF.Builder.CreateIsNull(ShouldDeleteCondition);
1385      CGF.Builder.CreateCondBr(ShouldCallDelete, continueBB, callDeleteBB);
1386
1387      CGF.EmitBlock(callDeleteBB);
1388      const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl);
1389      const CXXRecordDecl *ClassDecl = Dtor->getParent();
1390      CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(),
1391                         CGF.getContext().getTagDeclType(ClassDecl));
1392      CGF.Builder.CreateBr(continueBB);
1393
1394      CGF.EmitBlock(continueBB);
1395    }
1396  };
1397
1398  class DestroyField  : public EHScopeStack::Cleanup {
1399    const FieldDecl *field;
1400    CodeGenFunction::Destroyer *destroyer;
1401    bool useEHCleanupForArray;
1402
1403  public:
1404    DestroyField(const FieldDecl *field, CodeGenFunction::Destroyer *destroyer,
1405                 bool useEHCleanupForArray)
1406      : field(field), destroyer(destroyer),
1407        useEHCleanupForArray(useEHCleanupForArray) {}
1408
1409    void Emit(CodeGenFunction &CGF, Flags flags) override {
1410      // Find the address of the field.
1411      llvm::Value *thisValue = CGF.LoadCXXThis();
1412      QualType RecordTy = CGF.getContext().getTagDeclType(field->getParent());
1413      LValue ThisLV = CGF.MakeAddrLValue(thisValue, RecordTy);
1414      LValue LV = CGF.EmitLValueForField(ThisLV, field);
1415      assert(LV.isSimple());
1416
1417      CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer,
1418                      flags.isForNormalCleanup() && useEHCleanupForArray);
1419    }
1420  };
1421}
1422
1423/// \brief Emit all code that comes at the end of class's
1424/// destructor. This is to call destructors on members and base classes
1425/// in reverse order of their construction.
1426void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
1427                                        CXXDtorType DtorType) {
1428  assert((!DD->isTrivial() || DD->hasAttr<DLLExportAttr>()) &&
1429         "Should not emit dtor epilogue for non-exported trivial dtor!");
1430
1431  // The deleting-destructor phase just needs to call the appropriate
1432  // operator delete that Sema picked up.
1433  if (DtorType == Dtor_Deleting) {
1434    assert(DD->getOperatorDelete() &&
1435           "operator delete missing - EnterDtorCleanups");
1436    if (CXXStructorImplicitParamValue) {
1437      // If there is an implicit param to the deleting dtor, it's a boolean
1438      // telling whether we should call delete at the end of the dtor.
1439      EHStack.pushCleanup<CallDtorDeleteConditional>(
1440          NormalAndEHCleanup, CXXStructorImplicitParamValue);
1441    } else {
1442      EHStack.pushCleanup<CallDtorDelete>(NormalAndEHCleanup);
1443    }
1444    return;
1445  }
1446
1447  const CXXRecordDecl *ClassDecl = DD->getParent();
1448
1449  // Unions have no bases and do not call field destructors.
1450  if (ClassDecl->isUnion())
1451    return;
1452
1453  // The complete-destructor phase just destructs all the virtual bases.
1454  if (DtorType == Dtor_Complete) {
1455
1456    // We push them in the forward order so that they'll be popped in
1457    // the reverse order.
1458    for (const auto &Base : ClassDecl->vbases()) {
1459      CXXRecordDecl *BaseClassDecl
1460        = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
1461
1462      // Ignore trivial destructors.
1463      if (BaseClassDecl->hasTrivialDestructor())
1464        continue;
1465
1466      EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup,
1467                                        BaseClassDecl,
1468                                        /*BaseIsVirtual*/ true);
1469    }
1470
1471    return;
1472  }
1473
1474  assert(DtorType == Dtor_Base);
1475
1476  // Destroy non-virtual bases.
1477  for (const auto &Base : ClassDecl->bases()) {
1478    // Ignore virtual bases.
1479    if (Base.isVirtual())
1480      continue;
1481
1482    CXXRecordDecl *BaseClassDecl = Base.getType()->getAsCXXRecordDecl();
1483
1484    // Ignore trivial destructors.
1485    if (BaseClassDecl->hasTrivialDestructor())
1486      continue;
1487
1488    EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup,
1489                                      BaseClassDecl,
1490                                      /*BaseIsVirtual*/ false);
1491  }
1492
1493  // Destroy direct fields.
1494  for (const auto *Field : ClassDecl->fields()) {
1495    QualType type = Field->getType();
1496    QualType::DestructionKind dtorKind = type.isDestructedType();
1497    if (!dtorKind) continue;
1498
1499    // Anonymous union members do not have their destructors called.
1500    const RecordType *RT = type->getAsUnionType();
1501    if (RT && RT->getDecl()->isAnonymousStructOrUnion()) continue;
1502
1503    CleanupKind cleanupKind = getCleanupKind(dtorKind);
1504    EHStack.pushCleanup<DestroyField>(cleanupKind, Field,
1505                                      getDestroyer(dtorKind),
1506                                      cleanupKind & EHCleanup);
1507  }
1508}
1509
1510/// EmitCXXAggrConstructorCall - Emit a loop to call a particular
1511/// constructor for each of several members of an array.
1512///
1513/// \param ctor the constructor to call for each element
1514/// \param arrayType the type of the array to initialize
1515/// \param arrayBegin an arrayType*
1516/// \param zeroInitialize true if each element should be
1517///   zero-initialized before it is constructed
1518void
1519CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
1520                                            const ConstantArrayType *arrayType,
1521                                            llvm::Value *arrayBegin,
1522                                          CallExpr::const_arg_iterator argBegin,
1523                                            CallExpr::const_arg_iterator argEnd,
1524                                            bool zeroInitialize) {
1525  QualType elementType;
1526  llvm::Value *numElements =
1527    emitArrayLength(arrayType, elementType, arrayBegin);
1528
1529  EmitCXXAggrConstructorCall(ctor, numElements, arrayBegin,
1530                             argBegin, argEnd, zeroInitialize);
1531}
1532
1533/// EmitCXXAggrConstructorCall - Emit a loop to call a particular
1534/// constructor for each of several members of an array.
1535///
1536/// \param ctor the constructor to call for each element
1537/// \param numElements the number of elements in the array;
1538///   may be zero
1539/// \param arrayBegin a T*, where T is the type constructed by ctor
1540/// \param zeroInitialize true if each element should be
1541///   zero-initialized before it is constructed
1542void
1543CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
1544                                            llvm::Value *numElements,
1545                                            llvm::Value *arrayBegin,
1546                                         CallExpr::const_arg_iterator argBegin,
1547                                           CallExpr::const_arg_iterator argEnd,
1548                                            bool zeroInitialize) {
1549
1550  // It's legal for numElements to be zero.  This can happen both
1551  // dynamically, because x can be zero in 'new A[x]', and statically,
1552  // because of GCC extensions that permit zero-length arrays.  There
1553  // are probably legitimate places where we could assume that this
1554  // doesn't happen, but it's not clear that it's worth it.
1555  llvm::BranchInst *zeroCheckBranch = nullptr;
1556
1557  // Optimize for a constant count.
1558  llvm::ConstantInt *constantCount
1559    = dyn_cast<llvm::ConstantInt>(numElements);
1560  if (constantCount) {
1561    // Just skip out if the constant count is zero.
1562    if (constantCount->isZero()) return;
1563
1564  // Otherwise, emit the check.
1565  } else {
1566    llvm::BasicBlock *loopBB = createBasicBlock("new.ctorloop");
1567    llvm::Value *iszero = Builder.CreateIsNull(numElements, "isempty");
1568    zeroCheckBranch = Builder.CreateCondBr(iszero, loopBB, loopBB);
1569    EmitBlock(loopBB);
1570  }
1571
1572  // Find the end of the array.
1573  llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(arrayBegin, numElements,
1574                                                    "arrayctor.end");
1575
1576  // Enter the loop, setting up a phi for the current location to initialize.
1577  llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
1578  llvm::BasicBlock *loopBB = createBasicBlock("arrayctor.loop");
1579  EmitBlock(loopBB);
1580  llvm::PHINode *cur = Builder.CreatePHI(arrayBegin->getType(), 2,
1581                                         "arrayctor.cur");
1582  cur->addIncoming(arrayBegin, entryBB);
1583
1584  // Inside the loop body, emit the constructor call on the array element.
1585
1586  QualType type = getContext().getTypeDeclType(ctor->getParent());
1587
1588  // Zero initialize the storage, if requested.
1589  if (zeroInitialize)
1590    EmitNullInitialization(cur, type);
1591
1592  // C++ [class.temporary]p4:
1593  // There are two contexts in which temporaries are destroyed at a different
1594  // point than the end of the full-expression. The first context is when a
1595  // default constructor is called to initialize an element of an array.
1596  // If the constructor has one or more default arguments, the destruction of
1597  // every temporary created in a default argument expression is sequenced
1598  // before the construction of the next array element, if any.
1599
1600  {
1601    RunCleanupsScope Scope(*this);
1602
1603    // Evaluate the constructor and its arguments in a regular
1604    // partial-destroy cleanup.
1605    if (getLangOpts().Exceptions &&
1606        !ctor->getParent()->hasTrivialDestructor()) {
1607      Destroyer *destroyer = destroyCXXObject;
1608      pushRegularPartialArrayCleanup(arrayBegin, cur, type, *destroyer);
1609    }
1610
1611    EmitCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/ false,
1612                           /*Delegating=*/false, cur, argBegin, argEnd);
1613  }
1614
1615  // Go to the next element.
1616  llvm::Value *next =
1617    Builder.CreateInBoundsGEP(cur, llvm::ConstantInt::get(SizeTy, 1),
1618                              "arrayctor.next");
1619  cur->addIncoming(next, Builder.GetInsertBlock());
1620
1621  // Check whether that's the end of the loop.
1622  llvm::Value *done = Builder.CreateICmpEQ(next, arrayEnd, "arrayctor.done");
1623  llvm::BasicBlock *contBB = createBasicBlock("arrayctor.cont");
1624  Builder.CreateCondBr(done, contBB, loopBB);
1625
1626  // Patch the earlier check to skip over the loop.
1627  if (zeroCheckBranch) zeroCheckBranch->setSuccessor(0, contBB);
1628
1629  EmitBlock(contBB);
1630}
1631
1632void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF,
1633                                       llvm::Value *addr,
1634                                       QualType type) {
1635  const RecordType *rtype = type->castAs<RecordType>();
1636  const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl());
1637  const CXXDestructorDecl *dtor = record->getDestructor();
1638  assert(!dtor->isTrivial());
1639  CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false,
1640                            /*Delegating=*/false, addr);
1641}
1642
1643void
1644CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
1645                                        CXXCtorType Type, bool ForVirtualBase,
1646                                        bool Delegating,
1647                                        llvm::Value *This,
1648                                        CallExpr::const_arg_iterator ArgBeg,
1649                                        CallExpr::const_arg_iterator ArgEnd) {
1650  // If this is a trivial constructor, just emit what's needed.
1651  if (D->isTrivial()) {
1652    if (ArgBeg == ArgEnd) {
1653      // Trivial default constructor, no codegen required.
1654      assert(D->isDefaultConstructor() &&
1655             "trivial 0-arg ctor not a default ctor");
1656      return;
1657    }
1658
1659    assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor");
1660    assert(D->isCopyOrMoveConstructor() &&
1661           "trivial 1-arg ctor not a copy/move ctor");
1662
1663    const Expr *E = (*ArgBeg);
1664    QualType Ty = E->getType();
1665    llvm::Value *Src = EmitLValue(E).getAddress();
1666    EmitAggregateCopy(This, Src, Ty);
1667    return;
1668  }
1669
1670  // C++11 [class.mfct.non-static]p2:
1671  //   If a non-static member function of a class X is called for an object that
1672  //   is not of type X, or of a type derived from X, the behavior is undefined.
1673  // FIXME: Provide a source location here.
1674  EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, SourceLocation(), This,
1675                getContext().getRecordType(D->getParent()));
1676
1677  CallArgList Args;
1678
1679  // Push the this ptr.
1680  Args.add(RValue::get(This), D->getThisType(getContext()));
1681
1682  // Add the rest of the user-supplied arguments.
1683  const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>();
1684  EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
1685
1686  // Insert any ABI-specific implicit constructor arguments.
1687  unsigned ExtraArgs = CGM.getCXXABI().addImplicitConstructorArgs(
1688      *this, D, Type, ForVirtualBase, Delegating, Args);
1689
1690  // Emit the call.
1691  llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, Type);
1692  const CGFunctionInfo &Info =
1693      CGM.getTypes().arrangeCXXConstructorCall(Args, D, Type, ExtraArgs);
1694  EmitCall(Info, Callee, ReturnValueSlot(), Args, D);
1695}
1696
1697void
1698CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
1699                                        llvm::Value *This, llvm::Value *Src,
1700                                        CallExpr::const_arg_iterator ArgBeg,
1701                                        CallExpr::const_arg_iterator ArgEnd) {
1702  if (D->isTrivial()) {
1703    assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor");
1704    assert(D->isCopyOrMoveConstructor() &&
1705           "trivial 1-arg ctor not a copy/move ctor");
1706    EmitAggregateCopy(This, Src, (*ArgBeg)->getType());
1707    return;
1708  }
1709  llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, clang::Ctor_Complete);
1710  assert(D->isInstance() &&
1711         "Trying to emit a member call expr on a static method!");
1712
1713  const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>();
1714
1715  CallArgList Args;
1716
1717  // Push the this ptr.
1718  Args.add(RValue::get(This), D->getThisType(getContext()));
1719
1720  // Push the src ptr.
1721  QualType QT = *(FPT->param_type_begin());
1722  llvm::Type *t = CGM.getTypes().ConvertType(QT);
1723  Src = Builder.CreateBitCast(Src, t);
1724  Args.add(RValue::get(Src), QT);
1725
1726  // Skip over first argument (Src).
1727  EmitCallArgs(Args, FPT->isVariadic(), FPT->param_type_begin() + 1,
1728               FPT->param_type_end(), ArgBeg + 1, ArgEnd);
1729
1730  EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, RequiredArgs::All),
1731           Callee, ReturnValueSlot(), Args, D);
1732}
1733
1734void
1735CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
1736                                                CXXCtorType CtorType,
1737                                                const FunctionArgList &Args,
1738                                                SourceLocation Loc) {
1739  CallArgList DelegateArgs;
1740
1741  FunctionArgList::const_iterator I = Args.begin(), E = Args.end();
1742  assert(I != E && "no parameters to constructor");
1743
1744  // this
1745  DelegateArgs.add(RValue::get(LoadCXXThis()), (*I)->getType());
1746  ++I;
1747
1748  // vtt
1749  if (llvm::Value *VTT = GetVTTParameter(GlobalDecl(Ctor, CtorType),
1750                                         /*ForVirtualBase=*/false,
1751                                         /*Delegating=*/true)) {
1752    QualType VoidPP = getContext().getPointerType(getContext().VoidPtrTy);
1753    DelegateArgs.add(RValue::get(VTT), VoidPP);
1754
1755    if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) {
1756      assert(I != E && "cannot skip vtt parameter, already done with args");
1757      assert((*I)->getType() == VoidPP && "skipping parameter not of vtt type");
1758      ++I;
1759    }
1760  }
1761
1762  // Explicit arguments.
1763  for (; I != E; ++I) {
1764    const VarDecl *param = *I;
1765    // FIXME: per-argument source location
1766    EmitDelegateCallArg(DelegateArgs, param, Loc);
1767  }
1768
1769  llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(Ctor, CtorType);
1770  EmitCall(CGM.getTypes().arrangeCXXConstructorDeclaration(Ctor, CtorType),
1771           Callee, ReturnValueSlot(), DelegateArgs, Ctor);
1772}
1773
1774namespace {
1775  struct CallDelegatingCtorDtor : EHScopeStack::Cleanup {
1776    const CXXDestructorDecl *Dtor;
1777    llvm::Value *Addr;
1778    CXXDtorType Type;
1779
1780    CallDelegatingCtorDtor(const CXXDestructorDecl *D, llvm::Value *Addr,
1781                           CXXDtorType Type)
1782      : Dtor(D), Addr(Addr), Type(Type) {}
1783
1784    void Emit(CodeGenFunction &CGF, Flags flags) override {
1785      CGF.EmitCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false,
1786                                /*Delegating=*/true, Addr);
1787    }
1788  };
1789}
1790
1791void
1792CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
1793                                                  const FunctionArgList &Args) {
1794  assert(Ctor->isDelegatingConstructor());
1795
1796  llvm::Value *ThisPtr = LoadCXXThis();
1797
1798  QualType Ty = getContext().getTagDeclType(Ctor->getParent());
1799  CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
1800  AggValueSlot AggSlot =
1801    AggValueSlot::forAddr(ThisPtr, Alignment, Qualifiers(),
1802                          AggValueSlot::IsDestructed,
1803                          AggValueSlot::DoesNotNeedGCBarriers,
1804                          AggValueSlot::IsNotAliased);
1805
1806  EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot);
1807
1808  const CXXRecordDecl *ClassDecl = Ctor->getParent();
1809  if (CGM.getLangOpts().Exceptions && !ClassDecl->hasTrivialDestructor()) {
1810    CXXDtorType Type =
1811      CurGD.getCtorType() == Ctor_Complete ? Dtor_Complete : Dtor_Base;
1812
1813    EHStack.pushCleanup<CallDelegatingCtorDtor>(EHCleanup,
1814                                                ClassDecl->getDestructor(),
1815                                                ThisPtr, Type);
1816  }
1817}
1818
1819void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
1820                                            CXXDtorType Type,
1821                                            bool ForVirtualBase,
1822                                            bool Delegating,
1823                                            llvm::Value *This) {
1824  CGM.getCXXABI().EmitDestructorCall(*this, DD, Type, ForVirtualBase,
1825                                     Delegating, This);
1826}
1827
1828namespace {
1829  struct CallLocalDtor : EHScopeStack::Cleanup {
1830    const CXXDestructorDecl *Dtor;
1831    llvm::Value *Addr;
1832
1833    CallLocalDtor(const CXXDestructorDecl *D, llvm::Value *Addr)
1834      : Dtor(D), Addr(Addr) {}
1835
1836    void Emit(CodeGenFunction &CGF, Flags flags) override {
1837      CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1838                                /*ForVirtualBase=*/false,
1839                                /*Delegating=*/false, Addr);
1840    }
1841  };
1842}
1843
1844void CodeGenFunction::PushDestructorCleanup(const CXXDestructorDecl *D,
1845                                            llvm::Value *Addr) {
1846  EHStack.pushCleanup<CallLocalDtor>(NormalAndEHCleanup, D, Addr);
1847}
1848
1849void CodeGenFunction::PushDestructorCleanup(QualType T, llvm::Value *Addr) {
1850  CXXRecordDecl *ClassDecl = T->getAsCXXRecordDecl();
1851  if (!ClassDecl) return;
1852  if (ClassDecl->hasTrivialDestructor()) return;
1853
1854  const CXXDestructorDecl *D = ClassDecl->getDestructor();
1855  assert(D && D->isUsed() && "destructor not marked as used!");
1856  PushDestructorCleanup(D, Addr);
1857}
1858
1859void
1860CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
1861                                         const CXXRecordDecl *NearestVBase,
1862                                         CharUnits OffsetFromNearestVBase,
1863                                         const CXXRecordDecl *VTableClass) {
1864  // Compute the address point.
1865  bool NeedsVirtualOffset;
1866  llvm::Value *VTableAddressPoint =
1867      CGM.getCXXABI().getVTableAddressPointInStructor(
1868          *this, VTableClass, Base, NearestVBase, NeedsVirtualOffset);
1869  if (!VTableAddressPoint)
1870    return;
1871
1872  // Compute where to store the address point.
1873  llvm::Value *VirtualOffset = nullptr;
1874  CharUnits NonVirtualOffset = CharUnits::Zero();
1875
1876  if (NeedsVirtualOffset) {
1877    // We need to use the virtual base offset offset because the virtual base
1878    // might have a different offset in the most derived class.
1879    VirtualOffset = CGM.getCXXABI().GetVirtualBaseClassOffset(*this,
1880                                                              LoadCXXThis(),
1881                                                              VTableClass,
1882                                                              NearestVBase);
1883    NonVirtualOffset = OffsetFromNearestVBase;
1884  } else {
1885    // We can just use the base offset in the complete class.
1886    NonVirtualOffset = Base.getBaseOffset();
1887  }
1888
1889  // Apply the offsets.
1890  llvm::Value *VTableField = LoadCXXThis();
1891
1892  if (!NonVirtualOffset.isZero() || VirtualOffset)
1893    VTableField = ApplyNonVirtualAndVirtualOffset(*this, VTableField,
1894                                                  NonVirtualOffset,
1895                                                  VirtualOffset);
1896
1897  // Finally, store the address point.
1898  llvm::Type *AddressPointPtrTy =
1899    VTableAddressPoint->getType()->getPointerTo();
1900  VTableField = Builder.CreateBitCast(VTableField, AddressPointPtrTy);
1901  llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField);
1902  CGM.DecorateInstruction(Store, CGM.getTBAAInfoForVTablePtr());
1903}
1904
1905void
1906CodeGenFunction::InitializeVTablePointers(BaseSubobject Base,
1907                                          const CXXRecordDecl *NearestVBase,
1908                                          CharUnits OffsetFromNearestVBase,
1909                                          bool BaseIsNonVirtualPrimaryBase,
1910                                          const CXXRecordDecl *VTableClass,
1911                                          VisitedVirtualBasesSetTy& VBases) {
1912  // If this base is a non-virtual primary base the address point has already
1913  // been set.
1914  if (!BaseIsNonVirtualPrimaryBase) {
1915    // Initialize the vtable pointer for this base.
1916    InitializeVTablePointer(Base, NearestVBase, OffsetFromNearestVBase,
1917                            VTableClass);
1918  }
1919
1920  const CXXRecordDecl *RD = Base.getBase();
1921
1922  // Traverse bases.
1923  for (const auto &I : RD->bases()) {
1924    CXXRecordDecl *BaseDecl
1925      = cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
1926
1927    // Ignore classes without a vtable.
1928    if (!BaseDecl->isDynamicClass())
1929      continue;
1930
1931    CharUnits BaseOffset;
1932    CharUnits BaseOffsetFromNearestVBase;
1933    bool BaseDeclIsNonVirtualPrimaryBase;
1934
1935    if (I.isVirtual()) {
1936      // Check if we've visited this virtual base before.
1937      if (!VBases.insert(BaseDecl))
1938        continue;
1939
1940      const ASTRecordLayout &Layout =
1941        getContext().getASTRecordLayout(VTableClass);
1942
1943      BaseOffset = Layout.getVBaseClassOffset(BaseDecl);
1944      BaseOffsetFromNearestVBase = CharUnits::Zero();
1945      BaseDeclIsNonVirtualPrimaryBase = false;
1946    } else {
1947      const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
1948
1949      BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl);
1950      BaseOffsetFromNearestVBase =
1951        OffsetFromNearestVBase + Layout.getBaseClassOffset(BaseDecl);
1952      BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl;
1953    }
1954
1955    InitializeVTablePointers(BaseSubobject(BaseDecl, BaseOffset),
1956                             I.isVirtual() ? BaseDecl : NearestVBase,
1957                             BaseOffsetFromNearestVBase,
1958                             BaseDeclIsNonVirtualPrimaryBase,
1959                             VTableClass, VBases);
1960  }
1961}
1962
1963void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) {
1964  // Ignore classes without a vtable.
1965  if (!RD->isDynamicClass())
1966    return;
1967
1968  // Initialize the vtable pointers for this class and all of its bases.
1969  VisitedVirtualBasesSetTy VBases;
1970  InitializeVTablePointers(BaseSubobject(RD, CharUnits::Zero()),
1971                           /*NearestVBase=*/nullptr,
1972                           /*OffsetFromNearestVBase=*/CharUnits::Zero(),
1973                           /*BaseIsNonVirtualPrimaryBase=*/false, RD, VBases);
1974
1975  if (RD->getNumVBases())
1976    CGM.getCXXABI().initializeHiddenVirtualInheritanceMembers(*this, RD);
1977}
1978
1979llvm::Value *CodeGenFunction::GetVTablePtr(llvm::Value *This,
1980                                           llvm::Type *Ty) {
1981  llvm::Value *VTablePtrSrc = Builder.CreateBitCast(This, Ty->getPointerTo());
1982  llvm::Instruction *VTable = Builder.CreateLoad(VTablePtrSrc, "vtable");
1983  CGM.DecorateInstruction(VTable, CGM.getTBAAInfoForVTablePtr());
1984  return VTable;
1985}
1986
1987
1988// FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
1989// quite what we want.
1990static const Expr *skipNoOpCastsAndParens(const Expr *E) {
1991  while (true) {
1992    if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
1993      E = PE->getSubExpr();
1994      continue;
1995    }
1996
1997    if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
1998      if (CE->getCastKind() == CK_NoOp) {
1999        E = CE->getSubExpr();
2000        continue;
2001      }
2002    }
2003    if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
2004      if (UO->getOpcode() == UO_Extension) {
2005        E = UO->getSubExpr();
2006        continue;
2007      }
2008    }
2009    return E;
2010  }
2011}
2012
2013bool
2014CodeGenFunction::CanDevirtualizeMemberFunctionCall(const Expr *Base,
2015                                                   const CXXMethodDecl *MD) {
2016  // When building with -fapple-kext, all calls must go through the vtable since
2017  // the kernel linker can do runtime patching of vtables.
2018  if (getLangOpts().AppleKext)
2019    return false;
2020
2021  // If the most derived class is marked final, we know that no subclass can
2022  // override this member function and so we can devirtualize it. For example:
2023  //
2024  // struct A { virtual void f(); }
2025  // struct B final : A { };
2026  //
2027  // void f(B *b) {
2028  //   b->f();
2029  // }
2030  //
2031  const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType();
2032  if (MostDerivedClassDecl->hasAttr<FinalAttr>())
2033    return true;
2034
2035  // If the member function is marked 'final', we know that it can't be
2036  // overridden and can therefore devirtualize it.
2037  if (MD->hasAttr<FinalAttr>())
2038    return true;
2039
2040  // Similarly, if the class itself is marked 'final' it can't be overridden
2041  // and we can therefore devirtualize the member function call.
2042  if (MD->getParent()->hasAttr<FinalAttr>())
2043    return true;
2044
2045  Base = skipNoOpCastsAndParens(Base);
2046  if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
2047    if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
2048      // This is a record decl. We know the type and can devirtualize it.
2049      return VD->getType()->isRecordType();
2050    }
2051
2052    return false;
2053  }
2054
2055  // We can devirtualize calls on an object accessed by a class member access
2056  // expression, since by C++11 [basic.life]p6 we know that it can't refer to
2057  // a derived class object constructed in the same location.
2058  if (const MemberExpr *ME = dyn_cast<MemberExpr>(Base))
2059    if (const ValueDecl *VD = dyn_cast<ValueDecl>(ME->getMemberDecl()))
2060      return VD->getType()->isRecordType();
2061
2062  // We can always devirtualize calls on temporary object expressions.
2063  if (isa<CXXConstructExpr>(Base))
2064    return true;
2065
2066  // And calls on bound temporaries.
2067  if (isa<CXXBindTemporaryExpr>(Base))
2068    return true;
2069
2070  // Check if this is a call expr that returns a record type.
2071  if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
2072    return CE->getCallReturnType()->isRecordType();
2073
2074  // We can't devirtualize the call.
2075  return false;
2076}
2077
2078llvm::Value *
2079CodeGenFunction::EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E,
2080                                             const CXXMethodDecl *MD,
2081                                             llvm::Value *This) {
2082  llvm::FunctionType *fnType =
2083    CGM.getTypes().GetFunctionType(
2084                             CGM.getTypes().arrangeCXXMethodDeclaration(MD));
2085
2086  if (MD->isVirtual() && !CanDevirtualizeMemberFunctionCall(E->getArg(0), MD))
2087    return CGM.getCXXABI().getVirtualFunctionPointer(*this, MD, This, fnType);
2088
2089  return CGM.GetAddrOfFunction(MD, fnType);
2090}
2091
2092void CodeGenFunction::EmitForwardingCallToLambda(
2093                                      const CXXMethodDecl *callOperator,
2094                                      CallArgList &callArgs) {
2095  // Get the address of the call operator.
2096  const CGFunctionInfo &calleeFnInfo =
2097    CGM.getTypes().arrangeCXXMethodDeclaration(callOperator);
2098  llvm::Value *callee =
2099    CGM.GetAddrOfFunction(GlobalDecl(callOperator),
2100                          CGM.getTypes().GetFunctionType(calleeFnInfo));
2101
2102  // Prepare the return slot.
2103  const FunctionProtoType *FPT =
2104    callOperator->getType()->castAs<FunctionProtoType>();
2105  QualType resultType = FPT->getReturnType();
2106  ReturnValueSlot returnSlot;
2107  if (!resultType->isVoidType() &&
2108      calleeFnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect &&
2109      !hasScalarEvaluationKind(calleeFnInfo.getReturnType()))
2110    returnSlot = ReturnValueSlot(ReturnValue, resultType.isVolatileQualified());
2111
2112  // We don't need to separately arrange the call arguments because
2113  // the call can't be variadic anyway --- it's impossible to forward
2114  // variadic arguments.
2115
2116  // Now emit our call.
2117  RValue RV = EmitCall(calleeFnInfo, callee, returnSlot,
2118                       callArgs, callOperator);
2119
2120  // If necessary, copy the returned value into the slot.
2121  if (!resultType->isVoidType() && returnSlot.isNull())
2122    EmitReturnOfRValue(RV, resultType);
2123  else
2124    EmitBranchThroughCleanup(ReturnBlock);
2125}
2126
2127void CodeGenFunction::EmitLambdaBlockInvokeBody() {
2128  const BlockDecl *BD = BlockInfo->getBlockDecl();
2129  const VarDecl *variable = BD->capture_begin()->getVariable();
2130  const CXXRecordDecl *Lambda = variable->getType()->getAsCXXRecordDecl();
2131
2132  // Start building arguments for forwarding call
2133  CallArgList CallArgs;
2134
2135  QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
2136  llvm::Value *ThisPtr = GetAddrOfBlockDecl(variable, false);
2137  CallArgs.add(RValue::get(ThisPtr), ThisType);
2138
2139  // Add the rest of the parameters.
2140  for (auto param : BD->params())
2141    EmitDelegateCallArg(CallArgs, param, param->getLocStart());
2142
2143  assert(!Lambda->isGenericLambda() &&
2144            "generic lambda interconversion to block not implemented");
2145  EmitForwardingCallToLambda(Lambda->getLambdaCallOperator(), CallArgs);
2146}
2147
2148void CodeGenFunction::EmitLambdaToBlockPointerBody(FunctionArgList &Args) {
2149  if (cast<CXXMethodDecl>(CurCodeDecl)->isVariadic()) {
2150    // FIXME: Making this work correctly is nasty because it requires either
2151    // cloning the body of the call operator or making the call operator forward.
2152    CGM.ErrorUnsupported(CurCodeDecl, "lambda conversion to variadic function");
2153    return;
2154  }
2155
2156  EmitFunctionBody(Args, cast<FunctionDecl>(CurGD.getDecl())->getBody());
2157}
2158
2159void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) {
2160  const CXXRecordDecl *Lambda = MD->getParent();
2161
2162  // Start building arguments for forwarding call
2163  CallArgList CallArgs;
2164
2165  QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
2166  llvm::Value *ThisPtr = llvm::UndefValue::get(getTypes().ConvertType(ThisType));
2167  CallArgs.add(RValue::get(ThisPtr), ThisType);
2168
2169  // Add the rest of the parameters.
2170  for (auto Param : MD->params())
2171    EmitDelegateCallArg(CallArgs, Param, Param->getLocStart());
2172
2173  const CXXMethodDecl *CallOp = Lambda->getLambdaCallOperator();
2174  // For a generic lambda, find the corresponding call operator specialization
2175  // to which the call to the static-invoker shall be forwarded.
2176  if (Lambda->isGenericLambda()) {
2177    assert(MD->isFunctionTemplateSpecialization());
2178    const TemplateArgumentList *TAL = MD->getTemplateSpecializationArgs();
2179    FunctionTemplateDecl *CallOpTemplate = CallOp->getDescribedFunctionTemplate();
2180    void *InsertPos = nullptr;
2181    FunctionDecl *CorrespondingCallOpSpecialization =
2182        CallOpTemplate->findSpecialization(TAL->asArray(), InsertPos);
2183    assert(CorrespondingCallOpSpecialization);
2184    CallOp = cast<CXXMethodDecl>(CorrespondingCallOpSpecialization);
2185  }
2186  EmitForwardingCallToLambda(CallOp, CallArgs);
2187}
2188
2189void CodeGenFunction::EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD) {
2190  if (MD->isVariadic()) {
2191    // FIXME: Making this work correctly is nasty because it requires either
2192    // cloning the body of the call operator or making the call operator forward.
2193    CGM.ErrorUnsupported(MD, "lambda conversion to variadic function");
2194    return;
2195  }
2196
2197  EmitLambdaDelegatingInvokeBody(MD);
2198}
2199