ItaniumCXXABI.cpp revision a53d7a0259ff88f78ba8ecac7d0cb3ea96302b1d
1//===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This provides C++ code generation targeting the Itanium C++ ABI. The class 11// in this file generates structures that follow the Itanium C++ ABI, which is 12// documented at: 13// http://www.codesourcery.com/public/cxx-abi/abi.html 14// http://www.codesourcery.com/public/cxx-abi/abi-eh.html 15// 16// It also supports the closely-related ARM ABI, documented at: 17// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf 18// 19//===----------------------------------------------------------------------===// 20 21#include "CGCXXABI.h" 22#include "CGRecordLayout.h" 23#include "CGVTables.h" 24#include "CodeGenFunction.h" 25#include "CodeGenModule.h" 26#include "clang/AST/Mangle.h" 27#include "clang/AST/Type.h" 28#include "llvm/IR/DataLayout.h" 29#include "llvm/IR/Intrinsics.h" 30#include "llvm/IR/Value.h" 31 32using namespace clang; 33using namespace CodeGen; 34 35namespace { 36class ItaniumCXXABI : public CodeGen::CGCXXABI { 37 /// VTables - All the vtables which have been defined. 38 llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables; 39 40protected: 41 bool UseARMMethodPtrABI; 42 bool UseARMGuardVarABI; 43 44public: 45 ItaniumCXXABI(CodeGen::CodeGenModule &CGM, 46 bool UseARMMethodPtrABI = false, 47 bool UseARMGuardVarABI = false) : 48 CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI), 49 UseARMGuardVarABI(UseARMGuardVarABI) { } 50 51 bool isReturnTypeIndirect(const CXXRecordDecl *RD) const { 52 // Structures with either a non-trivial destructor or a non-trivial 53 // copy constructor are always indirect. 54 return !RD->hasTrivialDestructor() || RD->hasNonTrivialCopyConstructor(); 55 } 56 57 RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const { 58 // Structures with either a non-trivial destructor or a non-trivial 59 // copy constructor are always indirect. 60 if (!RD->hasTrivialDestructor() || RD->hasNonTrivialCopyConstructor()) 61 return RAA_Indirect; 62 return RAA_Default; 63 } 64 65 bool isZeroInitializable(const MemberPointerType *MPT); 66 67 llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT); 68 69 llvm::Value *EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF, 70 llvm::Value *&This, 71 llvm::Value *MemFnPtr, 72 const MemberPointerType *MPT); 73 74 llvm::Value *EmitMemberDataPointerAddress(CodeGenFunction &CGF, 75 llvm::Value *Base, 76 llvm::Value *MemPtr, 77 const MemberPointerType *MPT); 78 79 llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF, 80 const CastExpr *E, 81 llvm::Value *Src); 82 llvm::Constant *EmitMemberPointerConversion(const CastExpr *E, 83 llvm::Constant *Src); 84 85 llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT); 86 87 llvm::Constant *EmitMemberPointer(const CXXMethodDecl *MD); 88 llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT, 89 CharUnits offset); 90 llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT); 91 llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD, 92 CharUnits ThisAdjustment); 93 94 llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF, 95 llvm::Value *L, 96 llvm::Value *R, 97 const MemberPointerType *MPT, 98 bool Inequality); 99 100 llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF, 101 llvm::Value *Addr, 102 const MemberPointerType *MPT); 103 104 llvm::Value *adjustToCompleteObject(CodeGenFunction &CGF, 105 llvm::Value *ptr, 106 QualType type); 107 108 llvm::Value *GetVirtualBaseClassOffset(CodeGenFunction &CGF, 109 llvm::Value *This, 110 const CXXRecordDecl *ClassDecl, 111 const CXXRecordDecl *BaseClassDecl); 112 113 void BuildConstructorSignature(const CXXConstructorDecl *Ctor, 114 CXXCtorType T, 115 CanQualType &ResTy, 116 SmallVectorImpl<CanQualType> &ArgTys); 117 118 void EmitCXXConstructors(const CXXConstructorDecl *D); 119 120 void BuildDestructorSignature(const CXXDestructorDecl *Dtor, 121 CXXDtorType T, 122 CanQualType &ResTy, 123 SmallVectorImpl<CanQualType> &ArgTys); 124 125 bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor, 126 CXXDtorType DT) const { 127 // Itanium does not emit any destructor variant as an inline thunk. 128 // Delegating may occur as an optimization, but all variants are either 129 // emitted with external linkage or as linkonce if they are inline and used. 130 return false; 131 } 132 133 void EmitCXXDestructors(const CXXDestructorDecl *D); 134 135 void BuildInstanceFunctionParams(CodeGenFunction &CGF, 136 QualType &ResTy, 137 FunctionArgList &Params); 138 139 void EmitInstanceFunctionProlog(CodeGenFunction &CGF); 140 141 void EmitConstructorCall(CodeGenFunction &CGF, 142 const CXXConstructorDecl *D, CXXCtorType Type, 143 bool ForVirtualBase, bool Delegating, 144 llvm::Value *This, 145 CallExpr::const_arg_iterator ArgBeg, 146 CallExpr::const_arg_iterator ArgEnd); 147 148 void emitVTableDefinitions(CodeGenVTables &CGVT, const CXXRecordDecl *RD); 149 150 llvm::Value *getVTableAddressPointInStructor( 151 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, 152 BaseSubobject Base, const CXXRecordDecl *NearestVBase, 153 bool &NeedsVirtualOffset); 154 155 llvm::Constant * 156 getVTableAddressPointForConstExpr(BaseSubobject Base, 157 const CXXRecordDecl *VTableClass); 158 159 llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD, 160 CharUnits VPtrOffset); 161 162 llvm::Value *getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD, 163 llvm::Value *This, llvm::Type *Ty); 164 165 void EmitVirtualDestructorCall(CodeGenFunction &CGF, 166 const CXXDestructorDecl *Dtor, 167 CXXDtorType DtorType, SourceLocation CallLoc, 168 llvm::Value *This); 169 170 void emitVirtualInheritanceTables(const CXXRecordDecl *RD); 171 172 StringRef GetPureVirtualCallName() { return "__cxa_pure_virtual"; } 173 StringRef GetDeletedVirtualCallName() { return "__cxa_deleted_virtual"; } 174 175 CharUnits getArrayCookieSizeImpl(QualType elementType); 176 llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF, 177 llvm::Value *NewPtr, 178 llvm::Value *NumElements, 179 const CXXNewExpr *expr, 180 QualType ElementType); 181 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, 182 llvm::Value *allocPtr, 183 CharUnits cookieSize); 184 185 void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D, 186 llvm::GlobalVariable *DeclPtr, bool PerformInit); 187 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, 188 llvm::Constant *dtor, llvm::Constant *addr); 189 190 llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD, 191 llvm::GlobalVariable *Var); 192 void EmitThreadLocalInitFuncs( 193 llvm::ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *> > Decls, 194 llvm::Function *InitFunc); 195 LValue EmitThreadLocalDeclRefExpr(CodeGenFunction &CGF, 196 const DeclRefExpr *DRE); 197 198 bool NeedsVTTParameter(GlobalDecl GD); 199}; 200 201class ARMCXXABI : public ItaniumCXXABI { 202public: 203 ARMCXXABI(CodeGen::CodeGenModule &CGM) : 204 ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true, 205 /* UseARMGuardVarABI = */ true) {} 206 207 bool HasThisReturn(GlobalDecl GD) const { 208 return (isa<CXXConstructorDecl>(GD.getDecl()) || ( 209 isa<CXXDestructorDecl>(GD.getDecl()) && 210 GD.getDtorType() != Dtor_Deleting)); 211 } 212 213 void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV, QualType ResTy); 214 215 CharUnits getArrayCookieSizeImpl(QualType elementType); 216 llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF, 217 llvm::Value *NewPtr, 218 llvm::Value *NumElements, 219 const CXXNewExpr *expr, 220 QualType ElementType); 221 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, llvm::Value *allocPtr, 222 CharUnits cookieSize); 223}; 224} 225 226CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) { 227 switch (CGM.getTarget().getCXXABI().getKind()) { 228 // For IR-generation purposes, there's no significant difference 229 // between the ARM and iOS ABIs. 230 case TargetCXXABI::GenericARM: 231 case TargetCXXABI::iOS: 232 return new ARMCXXABI(CGM); 233 234 // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't 235 // include the other 32-bit ARM oddities: constructor/destructor return values 236 // and array cookies. 237 case TargetCXXABI::GenericAArch64: 238 return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true, 239 /* UseARMGuardVarABI = */ true); 240 241 case TargetCXXABI::GenericItanium: 242 if (CGM.getContext().getTargetInfo().getTriple().getArch() 243 == llvm::Triple::le32) { 244 // For PNaCl, use ARM-style method pointers so that PNaCl code 245 // does not assume anything about the alignment of function 246 // pointers. 247 return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true, 248 /* UseARMGuardVarABI = */ false); 249 } 250 return new ItaniumCXXABI(CGM); 251 252 case TargetCXXABI::Microsoft: 253 llvm_unreachable("Microsoft ABI is not Itanium-based"); 254 } 255 llvm_unreachable("bad ABI kind"); 256} 257 258llvm::Type * 259ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) { 260 if (MPT->isMemberDataPointer()) 261 return CGM.PtrDiffTy; 262 return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy, NULL); 263} 264 265/// In the Itanium and ARM ABIs, method pointers have the form: 266/// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr; 267/// 268/// In the Itanium ABI: 269/// - method pointers are virtual if (memptr.ptr & 1) is nonzero 270/// - the this-adjustment is (memptr.adj) 271/// - the virtual offset is (memptr.ptr - 1) 272/// 273/// In the ARM ABI: 274/// - method pointers are virtual if (memptr.adj & 1) is nonzero 275/// - the this-adjustment is (memptr.adj >> 1) 276/// - the virtual offset is (memptr.ptr) 277/// ARM uses 'adj' for the virtual flag because Thumb functions 278/// may be only single-byte aligned. 279/// 280/// If the member is virtual, the adjusted 'this' pointer points 281/// to a vtable pointer from which the virtual offset is applied. 282/// 283/// If the member is non-virtual, memptr.ptr is the address of 284/// the function to call. 285llvm::Value * 286ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF, 287 llvm::Value *&This, 288 llvm::Value *MemFnPtr, 289 const MemberPointerType *MPT) { 290 CGBuilderTy &Builder = CGF.Builder; 291 292 const FunctionProtoType *FPT = 293 MPT->getPointeeType()->getAs<FunctionProtoType>(); 294 const CXXRecordDecl *RD = 295 cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl()); 296 297 llvm::FunctionType *FTy = 298 CGM.getTypes().GetFunctionType( 299 CGM.getTypes().arrangeCXXMethodType(RD, FPT)); 300 301 llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1); 302 303 llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual"); 304 llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual"); 305 llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end"); 306 307 // Extract memptr.adj, which is in the second field. 308 llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj"); 309 310 // Compute the true adjustment. 311 llvm::Value *Adj = RawAdj; 312 if (UseARMMethodPtrABI) 313 Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted"); 314 315 // Apply the adjustment and cast back to the original struct type 316 // for consistency. 317 llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy()); 318 Ptr = Builder.CreateInBoundsGEP(Ptr, Adj); 319 This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted"); 320 321 // Load the function pointer. 322 llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr"); 323 324 // If the LSB in the function pointer is 1, the function pointer points to 325 // a virtual function. 326 llvm::Value *IsVirtual; 327 if (UseARMMethodPtrABI) 328 IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1); 329 else 330 IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1); 331 IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual"); 332 Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual); 333 334 // In the virtual path, the adjustment left 'This' pointing to the 335 // vtable of the correct base subobject. The "function pointer" is an 336 // offset within the vtable (+1 for the virtual flag on non-ARM). 337 CGF.EmitBlock(FnVirtual); 338 339 // Cast the adjusted this to a pointer to vtable pointer and load. 340 llvm::Type *VTableTy = Builder.getInt8PtrTy(); 341 llvm::Value *VTable = Builder.CreateBitCast(This, VTableTy->getPointerTo()); 342 VTable = Builder.CreateLoad(VTable, "memptr.vtable"); 343 344 // Apply the offset. 345 llvm::Value *VTableOffset = FnAsInt; 346 if (!UseARMMethodPtrABI) 347 VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1); 348 VTable = Builder.CreateGEP(VTable, VTableOffset); 349 350 // Load the virtual function to call. 351 VTable = Builder.CreateBitCast(VTable, FTy->getPointerTo()->getPointerTo()); 352 llvm::Value *VirtualFn = Builder.CreateLoad(VTable, "memptr.virtualfn"); 353 CGF.EmitBranch(FnEnd); 354 355 // In the non-virtual path, the function pointer is actually a 356 // function pointer. 357 CGF.EmitBlock(FnNonVirtual); 358 llvm::Value *NonVirtualFn = 359 Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn"); 360 361 // We're done. 362 CGF.EmitBlock(FnEnd); 363 llvm::PHINode *Callee = Builder.CreatePHI(FTy->getPointerTo(), 2); 364 Callee->addIncoming(VirtualFn, FnVirtual); 365 Callee->addIncoming(NonVirtualFn, FnNonVirtual); 366 return Callee; 367} 368 369/// Compute an l-value by applying the given pointer-to-member to a 370/// base object. 371llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF, 372 llvm::Value *Base, 373 llvm::Value *MemPtr, 374 const MemberPointerType *MPT) { 375 assert(MemPtr->getType() == CGM.PtrDiffTy); 376 377 CGBuilderTy &Builder = CGF.Builder; 378 379 unsigned AS = Base->getType()->getPointerAddressSpace(); 380 381 // Cast to char*. 382 Base = Builder.CreateBitCast(Base, Builder.getInt8Ty()->getPointerTo(AS)); 383 384 // Apply the offset, which we assume is non-null. 385 llvm::Value *Addr = Builder.CreateInBoundsGEP(Base, MemPtr, "memptr.offset"); 386 387 // Cast the address to the appropriate pointer type, adopting the 388 // address space of the base pointer. 389 llvm::Type *PType 390 = CGF.ConvertTypeForMem(MPT->getPointeeType())->getPointerTo(AS); 391 return Builder.CreateBitCast(Addr, PType); 392} 393 394/// Perform a bitcast, derived-to-base, or base-to-derived member pointer 395/// conversion. 396/// 397/// Bitcast conversions are always a no-op under Itanium. 398/// 399/// Obligatory offset/adjustment diagram: 400/// <-- offset --> <-- adjustment --> 401/// |--------------------------|----------------------|--------------------| 402/// ^Derived address point ^Base address point ^Member address point 403/// 404/// So when converting a base member pointer to a derived member pointer, 405/// we add the offset to the adjustment because the address point has 406/// decreased; and conversely, when converting a derived MP to a base MP 407/// we subtract the offset from the adjustment because the address point 408/// has increased. 409/// 410/// The standard forbids (at compile time) conversion to and from 411/// virtual bases, which is why we don't have to consider them here. 412/// 413/// The standard forbids (at run time) casting a derived MP to a base 414/// MP when the derived MP does not point to a member of the base. 415/// This is why -1 is a reasonable choice for null data member 416/// pointers. 417llvm::Value * 418ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF, 419 const CastExpr *E, 420 llvm::Value *src) { 421 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer || 422 E->getCastKind() == CK_BaseToDerivedMemberPointer || 423 E->getCastKind() == CK_ReinterpretMemberPointer); 424 425 // Under Itanium, reinterprets don't require any additional processing. 426 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src; 427 428 // Use constant emission if we can. 429 if (isa<llvm::Constant>(src)) 430 return EmitMemberPointerConversion(E, cast<llvm::Constant>(src)); 431 432 llvm::Constant *adj = getMemberPointerAdjustment(E); 433 if (!adj) return src; 434 435 CGBuilderTy &Builder = CGF.Builder; 436 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer); 437 438 const MemberPointerType *destTy = 439 E->getType()->castAs<MemberPointerType>(); 440 441 // For member data pointers, this is just a matter of adding the 442 // offset if the source is non-null. 443 if (destTy->isMemberDataPointer()) { 444 llvm::Value *dst; 445 if (isDerivedToBase) 446 dst = Builder.CreateNSWSub(src, adj, "adj"); 447 else 448 dst = Builder.CreateNSWAdd(src, adj, "adj"); 449 450 // Null check. 451 llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType()); 452 llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull"); 453 return Builder.CreateSelect(isNull, src, dst); 454 } 455 456 // The this-adjustment is left-shifted by 1 on ARM. 457 if (UseARMMethodPtrABI) { 458 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue(); 459 offset <<= 1; 460 adj = llvm::ConstantInt::get(adj->getType(), offset); 461 } 462 463 llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj"); 464 llvm::Value *dstAdj; 465 if (isDerivedToBase) 466 dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj"); 467 else 468 dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj"); 469 470 return Builder.CreateInsertValue(src, dstAdj, 1); 471} 472 473llvm::Constant * 474ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E, 475 llvm::Constant *src) { 476 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer || 477 E->getCastKind() == CK_BaseToDerivedMemberPointer || 478 E->getCastKind() == CK_ReinterpretMemberPointer); 479 480 // Under Itanium, reinterprets don't require any additional processing. 481 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src; 482 483 // If the adjustment is trivial, we don't need to do anything. 484 llvm::Constant *adj = getMemberPointerAdjustment(E); 485 if (!adj) return src; 486 487 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer); 488 489 const MemberPointerType *destTy = 490 E->getType()->castAs<MemberPointerType>(); 491 492 // For member data pointers, this is just a matter of adding the 493 // offset if the source is non-null. 494 if (destTy->isMemberDataPointer()) { 495 // null maps to null. 496 if (src->isAllOnesValue()) return src; 497 498 if (isDerivedToBase) 499 return llvm::ConstantExpr::getNSWSub(src, adj); 500 else 501 return llvm::ConstantExpr::getNSWAdd(src, adj); 502 } 503 504 // The this-adjustment is left-shifted by 1 on ARM. 505 if (UseARMMethodPtrABI) { 506 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue(); 507 offset <<= 1; 508 adj = llvm::ConstantInt::get(adj->getType(), offset); 509 } 510 511 llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1); 512 llvm::Constant *dstAdj; 513 if (isDerivedToBase) 514 dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj); 515 else 516 dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj); 517 518 return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1); 519} 520 521llvm::Constant * 522ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) { 523 // Itanium C++ ABI 2.3: 524 // A NULL pointer is represented as -1. 525 if (MPT->isMemberDataPointer()) 526 return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true); 527 528 llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0); 529 llvm::Constant *Values[2] = { Zero, Zero }; 530 return llvm::ConstantStruct::getAnon(Values); 531} 532 533llvm::Constant * 534ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT, 535 CharUnits offset) { 536 // Itanium C++ ABI 2.3: 537 // A pointer to data member is an offset from the base address of 538 // the class object containing it, represented as a ptrdiff_t 539 return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity()); 540} 541 542llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) { 543 return BuildMemberPointer(MD, CharUnits::Zero()); 544} 545 546llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD, 547 CharUnits ThisAdjustment) { 548 assert(MD->isInstance() && "Member function must not be static!"); 549 MD = MD->getCanonicalDecl(); 550 551 CodeGenTypes &Types = CGM.getTypes(); 552 553 // Get the function pointer (or index if this is a virtual function). 554 llvm::Constant *MemPtr[2]; 555 if (MD->isVirtual()) { 556 uint64_t Index = CGM.getVTableContext().getMethodVTableIndex(MD); 557 558 const ASTContext &Context = getContext(); 559 CharUnits PointerWidth = 560 Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0)); 561 uint64_t VTableOffset = (Index * PointerWidth.getQuantity()); 562 563 if (UseARMMethodPtrABI) { 564 // ARM C++ ABI 3.2.1: 565 // This ABI specifies that adj contains twice the this 566 // adjustment, plus 1 if the member function is virtual. The 567 // least significant bit of adj then makes exactly the same 568 // discrimination as the least significant bit of ptr does for 569 // Itanium. 570 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset); 571 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, 572 2 * ThisAdjustment.getQuantity() + 1); 573 } else { 574 // Itanium C++ ABI 2.3: 575 // For a virtual function, [the pointer field] is 1 plus the 576 // virtual table offset (in bytes) of the function, 577 // represented as a ptrdiff_t. 578 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1); 579 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, 580 ThisAdjustment.getQuantity()); 581 } 582 } else { 583 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>(); 584 llvm::Type *Ty; 585 // Check whether the function has a computable LLVM signature. 586 if (Types.isFuncTypeConvertible(FPT)) { 587 // The function has a computable LLVM signature; use the correct type. 588 Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD)); 589 } else { 590 // Use an arbitrary non-function type to tell GetAddrOfFunction that the 591 // function type is incomplete. 592 Ty = CGM.PtrDiffTy; 593 } 594 llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty); 595 596 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy); 597 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, 598 (UseARMMethodPtrABI ? 2 : 1) * 599 ThisAdjustment.getQuantity()); 600 } 601 602 return llvm::ConstantStruct::getAnon(MemPtr); 603} 604 605llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP, 606 QualType MPType) { 607 const MemberPointerType *MPT = MPType->castAs<MemberPointerType>(); 608 const ValueDecl *MPD = MP.getMemberPointerDecl(); 609 if (!MPD) 610 return EmitNullMemberPointer(MPT); 611 612 CharUnits ThisAdjustment = getMemberPointerPathAdjustment(MP); 613 614 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD)) 615 return BuildMemberPointer(MD, ThisAdjustment); 616 617 CharUnits FieldOffset = 618 getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD)); 619 return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset); 620} 621 622/// The comparison algorithm is pretty easy: the member pointers are 623/// the same if they're either bitwise identical *or* both null. 624/// 625/// ARM is different here only because null-ness is more complicated. 626llvm::Value * 627ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF, 628 llvm::Value *L, 629 llvm::Value *R, 630 const MemberPointerType *MPT, 631 bool Inequality) { 632 CGBuilderTy &Builder = CGF.Builder; 633 634 llvm::ICmpInst::Predicate Eq; 635 llvm::Instruction::BinaryOps And, Or; 636 if (Inequality) { 637 Eq = llvm::ICmpInst::ICMP_NE; 638 And = llvm::Instruction::Or; 639 Or = llvm::Instruction::And; 640 } else { 641 Eq = llvm::ICmpInst::ICMP_EQ; 642 And = llvm::Instruction::And; 643 Or = llvm::Instruction::Or; 644 } 645 646 // Member data pointers are easy because there's a unique null 647 // value, so it just comes down to bitwise equality. 648 if (MPT->isMemberDataPointer()) 649 return Builder.CreateICmp(Eq, L, R); 650 651 // For member function pointers, the tautologies are more complex. 652 // The Itanium tautology is: 653 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj)) 654 // The ARM tautology is: 655 // (L == R) <==> (L.ptr == R.ptr && 656 // (L.adj == R.adj || 657 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0))) 658 // The inequality tautologies have exactly the same structure, except 659 // applying De Morgan's laws. 660 661 llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr"); 662 llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr"); 663 664 // This condition tests whether L.ptr == R.ptr. This must always be 665 // true for equality to hold. 666 llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr"); 667 668 // This condition, together with the assumption that L.ptr == R.ptr, 669 // tests whether the pointers are both null. ARM imposes an extra 670 // condition. 671 llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType()); 672 llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null"); 673 674 // This condition tests whether L.adj == R.adj. If this isn't 675 // true, the pointers are unequal unless they're both null. 676 llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj"); 677 llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj"); 678 llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj"); 679 680 // Null member function pointers on ARM clear the low bit of Adj, 681 // so the zero condition has to check that neither low bit is set. 682 if (UseARMMethodPtrABI) { 683 llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1); 684 685 // Compute (l.adj | r.adj) & 1 and test it against zero. 686 llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj"); 687 llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One); 688 llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero, 689 "cmp.or.adj"); 690 EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero); 691 } 692 693 // Tie together all our conditions. 694 llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq); 695 Result = Builder.CreateBinOp(And, PtrEq, Result, 696 Inequality ? "memptr.ne" : "memptr.eq"); 697 return Result; 698} 699 700llvm::Value * 701ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF, 702 llvm::Value *MemPtr, 703 const MemberPointerType *MPT) { 704 CGBuilderTy &Builder = CGF.Builder; 705 706 /// For member data pointers, this is just a check against -1. 707 if (MPT->isMemberDataPointer()) { 708 assert(MemPtr->getType() == CGM.PtrDiffTy); 709 llvm::Value *NegativeOne = 710 llvm::Constant::getAllOnesValue(MemPtr->getType()); 711 return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool"); 712 } 713 714 // In Itanium, a member function pointer is not null if 'ptr' is not null. 715 llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr"); 716 717 llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0); 718 llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool"); 719 720 // On ARM, a member function pointer is also non-null if the low bit of 'adj' 721 // (the virtual bit) is set. 722 if (UseARMMethodPtrABI) { 723 llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1); 724 llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj"); 725 llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit"); 726 llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero, 727 "memptr.isvirtual"); 728 Result = Builder.CreateOr(Result, IsVirtual); 729 } 730 731 return Result; 732} 733 734/// The Itanium ABI requires non-zero initialization only for data 735/// member pointers, for which '0' is a valid offset. 736bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) { 737 return MPT->getPointeeType()->isFunctionType(); 738} 739 740/// The Itanium ABI always places an offset to the complete object 741/// at entry -2 in the vtable. 742llvm::Value *ItaniumCXXABI::adjustToCompleteObject(CodeGenFunction &CGF, 743 llvm::Value *ptr, 744 QualType type) { 745 // Grab the vtable pointer as an intptr_t*. 746 llvm::Value *vtable = CGF.GetVTablePtr(ptr, CGF.IntPtrTy->getPointerTo()); 747 748 // Track back to entry -2 and pull out the offset there. 749 llvm::Value *offsetPtr = 750 CGF.Builder.CreateConstInBoundsGEP1_64(vtable, -2, "complete-offset.ptr"); 751 llvm::LoadInst *offset = CGF.Builder.CreateLoad(offsetPtr); 752 offset->setAlignment(CGF.PointerAlignInBytes); 753 754 // Apply the offset. 755 ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8PtrTy); 756 return CGF.Builder.CreateInBoundsGEP(ptr, offset); 757} 758 759llvm::Value * 760ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF, 761 llvm::Value *This, 762 const CXXRecordDecl *ClassDecl, 763 const CXXRecordDecl *BaseClassDecl) { 764 llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy); 765 CharUnits VBaseOffsetOffset = 766 CGM.getVTableContext().getVirtualBaseOffsetOffset(ClassDecl, BaseClassDecl); 767 768 llvm::Value *VBaseOffsetPtr = 769 CGF.Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(), 770 "vbase.offset.ptr"); 771 VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr, 772 CGM.PtrDiffTy->getPointerTo()); 773 774 llvm::Value *VBaseOffset = 775 CGF.Builder.CreateLoad(VBaseOffsetPtr, "vbase.offset"); 776 777 return VBaseOffset; 778} 779 780/// The generic ABI passes 'this', plus a VTT if it's initializing a 781/// base subobject. 782void ItaniumCXXABI::BuildConstructorSignature(const CXXConstructorDecl *Ctor, 783 CXXCtorType Type, 784 CanQualType &ResTy, 785 SmallVectorImpl<CanQualType> &ArgTys) { 786 ASTContext &Context = getContext(); 787 788 // 'this' parameter is already there, as well as 'this' return if 789 // HasThisReturn(GlobalDecl(Ctor, Type)) is true 790 791 // Check if we need to add a VTT parameter (which has type void **). 792 if (Type == Ctor_Base && Ctor->getParent()->getNumVBases() != 0) 793 ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy)); 794} 795 796void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) { 797 // Just make sure we're in sync with TargetCXXABI. 798 assert(CGM.getTarget().getCXXABI().hasConstructorVariants()); 799 800 // The constructor used for constructing this as a complete class; 801 // constucts the virtual bases, then calls the base constructor. 802 if (!D->getParent()->isAbstract()) { 803 // We don't need to emit the complete ctor if the class is abstract. 804 CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete)); 805 } 806 807 // The constructor used for constructing this as a base class; 808 // ignores virtual bases. 809 CGM.EmitGlobal(GlobalDecl(D, Ctor_Base)); 810} 811 812/// The generic ABI passes 'this', plus a VTT if it's destroying a 813/// base subobject. 814void ItaniumCXXABI::BuildDestructorSignature(const CXXDestructorDecl *Dtor, 815 CXXDtorType Type, 816 CanQualType &ResTy, 817 SmallVectorImpl<CanQualType> &ArgTys) { 818 ASTContext &Context = getContext(); 819 820 // 'this' parameter is already there, as well as 'this' return if 821 // HasThisReturn(GlobalDecl(Dtor, Type)) is true 822 823 // Check if we need to add a VTT parameter (which has type void **). 824 if (Type == Dtor_Base && Dtor->getParent()->getNumVBases() != 0) 825 ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy)); 826} 827 828void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) { 829 // The destructor in a virtual table is always a 'deleting' 830 // destructor, which calls the complete destructor and then uses the 831 // appropriate operator delete. 832 if (D->isVirtual()) 833 CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting)); 834 835 // The destructor used for destructing this as a most-derived class; 836 // call the base destructor and then destructs any virtual bases. 837 CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete)); 838 839 // The destructor used for destructing this as a base class; ignores 840 // virtual bases. 841 CGM.EmitGlobal(GlobalDecl(D, Dtor_Base)); 842} 843 844void ItaniumCXXABI::BuildInstanceFunctionParams(CodeGenFunction &CGF, 845 QualType &ResTy, 846 FunctionArgList &Params) { 847 /// Create the 'this' variable. 848 BuildThisParam(CGF, Params); 849 850 const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl()); 851 assert(MD->isInstance()); 852 853 // Check if we need a VTT parameter as well. 854 if (NeedsVTTParameter(CGF.CurGD)) { 855 ASTContext &Context = getContext(); 856 857 // FIXME: avoid the fake decl 858 QualType T = Context.getPointerType(Context.VoidPtrTy); 859 ImplicitParamDecl *VTTDecl 860 = ImplicitParamDecl::Create(Context, 0, MD->getLocation(), 861 &Context.Idents.get("vtt"), T); 862 Params.push_back(VTTDecl); 863 getVTTDecl(CGF) = VTTDecl; 864 } 865} 866 867void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) { 868 /// Initialize the 'this' slot. 869 EmitThisParam(CGF); 870 871 /// Initialize the 'vtt' slot if needed. 872 if (getVTTDecl(CGF)) { 873 getVTTValue(CGF) 874 = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(getVTTDecl(CGF)), 875 "vtt"); 876 } 877 878 /// If this is a function that the ABI specifies returns 'this', initialize 879 /// the return slot to 'this' at the start of the function. 880 /// 881 /// Unlike the setting of return types, this is done within the ABI 882 /// implementation instead of by clients of CGCXXABI because: 883 /// 1) getThisValue is currently protected 884 /// 2) in theory, an ABI could implement 'this' returns some other way; 885 /// HasThisReturn only specifies a contract, not the implementation 886 if (HasThisReturn(CGF.CurGD)) 887 CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue); 888} 889 890void ItaniumCXXABI::EmitConstructorCall(CodeGenFunction &CGF, 891 const CXXConstructorDecl *D, 892 CXXCtorType Type, 893 bool ForVirtualBase, bool Delegating, 894 llvm::Value *This, 895 CallExpr::const_arg_iterator ArgBeg, 896 CallExpr::const_arg_iterator ArgEnd) { 897 llvm::Value *VTT = CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, 898 Delegating); 899 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy); 900 llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, Type); 901 902 // FIXME: Provide a source location here. 903 CGF.EmitCXXMemberCall(D, SourceLocation(), Callee, ReturnValueSlot(), 904 This, VTT, VTTTy, ArgBeg, ArgEnd); 905} 906 907void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT, 908 const CXXRecordDecl *RD) { 909 llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits()); 910 if (VTable->hasInitializer()) 911 return; 912 913 VTableContext &VTContext = CGM.getVTableContext(); 914 const VTableLayout &VTLayout = VTContext.getVTableLayout(RD); 915 llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD); 916 917 // Create and set the initializer. 918 llvm::Constant *Init = CGVT.CreateVTableInitializer( 919 RD, VTLayout.vtable_component_begin(), VTLayout.getNumVTableComponents(), 920 VTLayout.vtable_thunk_begin(), VTLayout.getNumVTableThunks()); 921 VTable->setInitializer(Init); 922 923 // Set the correct linkage. 924 VTable->setLinkage(Linkage); 925 926 // Set the right visibility. 927 CGM.setTypeVisibility(VTable, RD, CodeGenModule::TVK_ForVTable); 928 929 // If this is the magic class __cxxabiv1::__fundamental_type_info, 930 // we will emit the typeinfo for the fundamental types. This is the 931 // same behaviour as GCC. 932 const DeclContext *DC = RD->getDeclContext(); 933 if (RD->getIdentifier() && 934 RD->getIdentifier()->isStr("__fundamental_type_info") && 935 isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() && 936 cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") && 937 DC->getParent()->isTranslationUnit()) 938 CGM.EmitFundamentalRTTIDescriptors(); 939} 940 941llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor( 942 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base, 943 const CXXRecordDecl *NearestVBase, bool &NeedsVirtualOffset) { 944 bool NeedsVTTParam = CGM.getCXXABI().NeedsVTTParameter(CGF.CurGD); 945 NeedsVirtualOffset = (NeedsVTTParam && NearestVBase); 946 947 llvm::Value *VTableAddressPoint; 948 if (NeedsVTTParam && (Base.getBase()->getNumVBases() || NearestVBase)) { 949 // Get the secondary vpointer index. 950 uint64_t VirtualPointerIndex = 951 CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base); 952 953 /// Load the VTT. 954 llvm::Value *VTT = CGF.LoadCXXVTT(); 955 if (VirtualPointerIndex) 956 VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex); 957 958 // And load the address point from the VTT. 959 VTableAddressPoint = CGF.Builder.CreateLoad(VTT); 960 } else { 961 llvm::Constant *VTable = 962 CGM.getCXXABI().getAddrOfVTable(VTableClass, CharUnits()); 963 uint64_t AddressPoint = CGM.getVTableContext().getVTableLayout(VTableClass) 964 .getAddressPoint(Base); 965 VTableAddressPoint = 966 CGF.Builder.CreateConstInBoundsGEP2_64(VTable, 0, AddressPoint); 967 } 968 969 return VTableAddressPoint; 970} 971 972llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr( 973 BaseSubobject Base, const CXXRecordDecl *VTableClass) { 974 llvm::Constant *VTable = getAddrOfVTable(VTableClass, CharUnits()); 975 976 // Find the appropriate vtable within the vtable group. 977 uint64_t AddressPoint = 978 CGM.getVTableContext().getVTableLayout(VTableClass).getAddressPoint(Base); 979 llvm::Value *Indices[] = { 980 llvm::ConstantInt::get(CGM.Int64Ty, 0), 981 llvm::ConstantInt::get(CGM.Int64Ty, AddressPoint) 982 }; 983 984 return llvm::ConstantExpr::getInBoundsGetElementPtr(VTable, Indices); 985} 986 987llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD, 988 CharUnits VPtrOffset) { 989 assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets"); 990 991 llvm::GlobalVariable *&VTable = VTables[RD]; 992 if (VTable) 993 return VTable; 994 995 // Queue up this v-table for possible deferred emission. 996 CGM.addDeferredVTable(RD); 997 998 SmallString<256> OutName; 999 llvm::raw_svector_ostream Out(OutName); 1000 CGM.getCXXABI().getMangleContext().mangleCXXVTable(RD, Out); 1001 Out.flush(); 1002 StringRef Name = OutName.str(); 1003 1004 VTableContext &VTContext = CGM.getVTableContext(); 1005 llvm::ArrayType *ArrayType = llvm::ArrayType::get( 1006 CGM.Int8PtrTy, VTContext.getVTableLayout(RD).getNumVTableComponents()); 1007 1008 VTable = CGM.CreateOrReplaceCXXRuntimeVariable( 1009 Name, ArrayType, llvm::GlobalValue::ExternalLinkage); 1010 VTable->setUnnamedAddr(true); 1011 return VTable; 1012} 1013 1014llvm::Value *ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF, 1015 GlobalDecl GD, 1016 llvm::Value *This, 1017 llvm::Type *Ty) { 1018 GD = GD.getCanonicalDecl(); 1019 Ty = Ty->getPointerTo()->getPointerTo(); 1020 llvm::Value *VTable = CGF.GetVTablePtr(This, Ty); 1021 1022 uint64_t VTableIndex = CGM.getVTableContext().getMethodVTableIndex(GD); 1023 llvm::Value *VFuncPtr = 1024 CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn"); 1025 return CGF.Builder.CreateLoad(VFuncPtr); 1026} 1027 1028void ItaniumCXXABI::EmitVirtualDestructorCall(CodeGenFunction &CGF, 1029 const CXXDestructorDecl *Dtor, 1030 CXXDtorType DtorType, 1031 SourceLocation CallLoc, 1032 llvm::Value *This) { 1033 assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete); 1034 1035 const CGFunctionInfo *FInfo 1036 = &CGM.getTypes().arrangeCXXDestructor(Dtor, DtorType); 1037 llvm::Type *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo); 1038 llvm::Value *Callee = 1039 getVirtualFunctionPointer(CGF, GlobalDecl(Dtor, DtorType), This, Ty); 1040 1041 CGF.EmitCXXMemberCall(Dtor, CallLoc, Callee, ReturnValueSlot(), This, 1042 /*ImplicitParam=*/0, QualType(), 0, 0); 1043} 1044 1045void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) { 1046 CodeGenVTables &VTables = CGM.getVTables(); 1047 llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD); 1048 VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD); 1049} 1050 1051void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF, 1052 RValue RV, QualType ResultType) { 1053 if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl())) 1054 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType); 1055 1056 // Destructor thunks in the ARM ABI have indeterminate results. 1057 llvm::Type *T = 1058 cast<llvm::PointerType>(CGF.ReturnValue->getType())->getElementType(); 1059 RValue Undef = RValue::get(llvm::UndefValue::get(T)); 1060 return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType); 1061} 1062 1063/************************** Array allocation cookies **************************/ 1064 1065CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) { 1066 // The array cookie is a size_t; pad that up to the element alignment. 1067 // The cookie is actually right-justified in that space. 1068 return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes), 1069 CGM.getContext().getTypeAlignInChars(elementType)); 1070} 1071 1072llvm::Value *ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF, 1073 llvm::Value *NewPtr, 1074 llvm::Value *NumElements, 1075 const CXXNewExpr *expr, 1076 QualType ElementType) { 1077 assert(requiresArrayCookie(expr)); 1078 1079 unsigned AS = NewPtr->getType()->getPointerAddressSpace(); 1080 1081 ASTContext &Ctx = getContext(); 1082 QualType SizeTy = Ctx.getSizeType(); 1083 CharUnits SizeSize = Ctx.getTypeSizeInChars(SizeTy); 1084 1085 // The size of the cookie. 1086 CharUnits CookieSize = 1087 std::max(SizeSize, Ctx.getTypeAlignInChars(ElementType)); 1088 assert(CookieSize == getArrayCookieSizeImpl(ElementType)); 1089 1090 // Compute an offset to the cookie. 1091 llvm::Value *CookiePtr = NewPtr; 1092 CharUnits CookieOffset = CookieSize - SizeSize; 1093 if (!CookieOffset.isZero()) 1094 CookiePtr = CGF.Builder.CreateConstInBoundsGEP1_64(CookiePtr, 1095 CookieOffset.getQuantity()); 1096 1097 // Write the number of elements into the appropriate slot. 1098 llvm::Value *NumElementsPtr 1099 = CGF.Builder.CreateBitCast(CookiePtr, 1100 CGF.ConvertType(SizeTy)->getPointerTo(AS)); 1101 CGF.Builder.CreateStore(NumElements, NumElementsPtr); 1102 1103 // Finally, compute a pointer to the actual data buffer by skipping 1104 // over the cookie completely. 1105 return CGF.Builder.CreateConstInBoundsGEP1_64(NewPtr, 1106 CookieSize.getQuantity()); 1107} 1108 1109llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF, 1110 llvm::Value *allocPtr, 1111 CharUnits cookieSize) { 1112 // The element size is right-justified in the cookie. 1113 llvm::Value *numElementsPtr = allocPtr; 1114 CharUnits numElementsOffset = 1115 cookieSize - CharUnits::fromQuantity(CGF.SizeSizeInBytes); 1116 if (!numElementsOffset.isZero()) 1117 numElementsPtr = 1118 CGF.Builder.CreateConstInBoundsGEP1_64(numElementsPtr, 1119 numElementsOffset.getQuantity()); 1120 1121 unsigned AS = allocPtr->getType()->getPointerAddressSpace(); 1122 numElementsPtr = 1123 CGF.Builder.CreateBitCast(numElementsPtr, CGF.SizeTy->getPointerTo(AS)); 1124 return CGF.Builder.CreateLoad(numElementsPtr); 1125} 1126 1127CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) { 1128 // ARM says that the cookie is always: 1129 // struct array_cookie { 1130 // std::size_t element_size; // element_size != 0 1131 // std::size_t element_count; 1132 // }; 1133 // But the base ABI doesn't give anything an alignment greater than 1134 // 8, so we can dismiss this as typical ABI-author blindness to 1135 // actual language complexity and round up to the element alignment. 1136 return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes), 1137 CGM.getContext().getTypeAlignInChars(elementType)); 1138} 1139 1140llvm::Value *ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF, 1141 llvm::Value *newPtr, 1142 llvm::Value *numElements, 1143 const CXXNewExpr *expr, 1144 QualType elementType) { 1145 assert(requiresArrayCookie(expr)); 1146 1147 // NewPtr is a char*, but we generalize to arbitrary addrspaces. 1148 unsigned AS = newPtr->getType()->getPointerAddressSpace(); 1149 1150 // The cookie is always at the start of the buffer. 1151 llvm::Value *cookie = newPtr; 1152 1153 // The first element is the element size. 1154 cookie = CGF.Builder.CreateBitCast(cookie, CGF.SizeTy->getPointerTo(AS)); 1155 llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy, 1156 getContext().getTypeSizeInChars(elementType).getQuantity()); 1157 CGF.Builder.CreateStore(elementSize, cookie); 1158 1159 // The second element is the element count. 1160 cookie = CGF.Builder.CreateConstInBoundsGEP1_32(cookie, 1); 1161 CGF.Builder.CreateStore(numElements, cookie); 1162 1163 // Finally, compute a pointer to the actual data buffer by skipping 1164 // over the cookie completely. 1165 CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType); 1166 return CGF.Builder.CreateConstInBoundsGEP1_64(newPtr, 1167 cookieSize.getQuantity()); 1168} 1169 1170llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF, 1171 llvm::Value *allocPtr, 1172 CharUnits cookieSize) { 1173 // The number of elements is at offset sizeof(size_t) relative to 1174 // the allocated pointer. 1175 llvm::Value *numElementsPtr 1176 = CGF.Builder.CreateConstInBoundsGEP1_64(allocPtr, CGF.SizeSizeInBytes); 1177 1178 unsigned AS = allocPtr->getType()->getPointerAddressSpace(); 1179 numElementsPtr = 1180 CGF.Builder.CreateBitCast(numElementsPtr, CGF.SizeTy->getPointerTo(AS)); 1181 return CGF.Builder.CreateLoad(numElementsPtr); 1182} 1183 1184/*********************** Static local initialization **************************/ 1185 1186static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM, 1187 llvm::PointerType *GuardPtrTy) { 1188 // int __cxa_guard_acquire(__guard *guard_object); 1189 llvm::FunctionType *FTy = 1190 llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy), 1191 GuardPtrTy, /*isVarArg=*/false); 1192 return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_acquire", 1193 llvm::AttributeSet::get(CGM.getLLVMContext(), 1194 llvm::AttributeSet::FunctionIndex, 1195 llvm::Attribute::NoUnwind)); 1196} 1197 1198static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM, 1199 llvm::PointerType *GuardPtrTy) { 1200 // void __cxa_guard_release(__guard *guard_object); 1201 llvm::FunctionType *FTy = 1202 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false); 1203 return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_release", 1204 llvm::AttributeSet::get(CGM.getLLVMContext(), 1205 llvm::AttributeSet::FunctionIndex, 1206 llvm::Attribute::NoUnwind)); 1207} 1208 1209static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM, 1210 llvm::PointerType *GuardPtrTy) { 1211 // void __cxa_guard_abort(__guard *guard_object); 1212 llvm::FunctionType *FTy = 1213 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false); 1214 return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_abort", 1215 llvm::AttributeSet::get(CGM.getLLVMContext(), 1216 llvm::AttributeSet::FunctionIndex, 1217 llvm::Attribute::NoUnwind)); 1218} 1219 1220namespace { 1221 struct CallGuardAbort : EHScopeStack::Cleanup { 1222 llvm::GlobalVariable *Guard; 1223 CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {} 1224 1225 void Emit(CodeGenFunction &CGF, Flags flags) { 1226 CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()), 1227 Guard); 1228 } 1229 }; 1230} 1231 1232/// The ARM code here follows the Itanium code closely enough that we 1233/// just special-case it at particular places. 1234void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF, 1235 const VarDecl &D, 1236 llvm::GlobalVariable *var, 1237 bool shouldPerformInit) { 1238 CGBuilderTy &Builder = CGF.Builder; 1239 1240 // We only need to use thread-safe statics for local non-TLS variables; 1241 // global initialization is always single-threaded. 1242 bool threadsafe = getContext().getLangOpts().ThreadsafeStatics && 1243 D.isLocalVarDecl() && !D.getTLSKind(); 1244 1245 // If we have a global variable with internal linkage and thread-safe statics 1246 // are disabled, we can just let the guard variable be of type i8. 1247 bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage(); 1248 1249 llvm::IntegerType *guardTy; 1250 if (useInt8GuardVariable) { 1251 guardTy = CGF.Int8Ty; 1252 } else { 1253 // Guard variables are 64 bits in the generic ABI and size width on ARM 1254 // (i.e. 32-bit on AArch32, 64-bit on AArch64). 1255 guardTy = (UseARMGuardVarABI ? CGF.SizeTy : CGF.Int64Ty); 1256 } 1257 llvm::PointerType *guardPtrTy = guardTy->getPointerTo(); 1258 1259 // Create the guard variable if we don't already have it (as we 1260 // might if we're double-emitting this function body). 1261 llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D); 1262 if (!guard) { 1263 // Mangle the name for the guard. 1264 SmallString<256> guardName; 1265 { 1266 llvm::raw_svector_ostream out(guardName); 1267 getMangleContext().mangleStaticGuardVariable(&D, out); 1268 out.flush(); 1269 } 1270 1271 // Create the guard variable with a zero-initializer. 1272 // Just absorb linkage and visibility from the guarded variable. 1273 guard = new llvm::GlobalVariable(CGM.getModule(), guardTy, 1274 false, var->getLinkage(), 1275 llvm::ConstantInt::get(guardTy, 0), 1276 guardName.str()); 1277 guard->setVisibility(var->getVisibility()); 1278 // If the variable is thread-local, so is its guard variable. 1279 guard->setThreadLocalMode(var->getThreadLocalMode()); 1280 1281 CGM.setStaticLocalDeclGuardAddress(&D, guard); 1282 } 1283 1284 // Test whether the variable has completed initialization. 1285 llvm::Value *isInitialized; 1286 1287 // ARM C++ ABI 3.2.3.1: 1288 // To support the potential use of initialization guard variables 1289 // as semaphores that are the target of ARM SWP and LDREX/STREX 1290 // synchronizing instructions we define a static initialization 1291 // guard variable to be a 4-byte aligned, 4- byte word with the 1292 // following inline access protocol. 1293 // #define INITIALIZED 1 1294 // if ((obj_guard & INITIALIZED) != INITIALIZED) { 1295 // if (__cxa_guard_acquire(&obj_guard)) 1296 // ... 1297 // } 1298 if (UseARMGuardVarABI && !useInt8GuardVariable) { 1299 llvm::Value *V = Builder.CreateLoad(guard); 1300 llvm::Value *Test1 = llvm::ConstantInt::get(guardTy, 1); 1301 V = Builder.CreateAnd(V, Test1); 1302 isInitialized = Builder.CreateIsNull(V, "guard.uninitialized"); 1303 1304 // Itanium C++ ABI 3.3.2: 1305 // The following is pseudo-code showing how these functions can be used: 1306 // if (obj_guard.first_byte == 0) { 1307 // if ( __cxa_guard_acquire (&obj_guard) ) { 1308 // try { 1309 // ... initialize the object ...; 1310 // } catch (...) { 1311 // __cxa_guard_abort (&obj_guard); 1312 // throw; 1313 // } 1314 // ... queue object destructor with __cxa_atexit() ...; 1315 // __cxa_guard_release (&obj_guard); 1316 // } 1317 // } 1318 } else { 1319 // Load the first byte of the guard variable. 1320 llvm::LoadInst *LI = 1321 Builder.CreateLoad(Builder.CreateBitCast(guard, CGM.Int8PtrTy)); 1322 LI->setAlignment(1); 1323 1324 // Itanium ABI: 1325 // An implementation supporting thread-safety on multiprocessor 1326 // systems must also guarantee that references to the initialized 1327 // object do not occur before the load of the initialization flag. 1328 // 1329 // In LLVM, we do this by marking the load Acquire. 1330 if (threadsafe) 1331 LI->setAtomic(llvm::Acquire); 1332 1333 isInitialized = Builder.CreateIsNull(LI, "guard.uninitialized"); 1334 } 1335 1336 llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check"); 1337 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end"); 1338 1339 // Check if the first byte of the guard variable is zero. 1340 Builder.CreateCondBr(isInitialized, InitCheckBlock, EndBlock); 1341 1342 CGF.EmitBlock(InitCheckBlock); 1343 1344 // Variables used when coping with thread-safe statics and exceptions. 1345 if (threadsafe) { 1346 // Call __cxa_guard_acquire. 1347 llvm::Value *V 1348 = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard); 1349 1350 llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init"); 1351 1352 Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"), 1353 InitBlock, EndBlock); 1354 1355 // Call __cxa_guard_abort along the exceptional edge. 1356 CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard); 1357 1358 CGF.EmitBlock(InitBlock); 1359 } 1360 1361 // Emit the initializer and add a global destructor if appropriate. 1362 CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit); 1363 1364 if (threadsafe) { 1365 // Pop the guard-abort cleanup if we pushed one. 1366 CGF.PopCleanupBlock(); 1367 1368 // Call __cxa_guard_release. This cannot throw. 1369 CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy), guard); 1370 } else { 1371 Builder.CreateStore(llvm::ConstantInt::get(guardTy, 1), guard); 1372 } 1373 1374 CGF.EmitBlock(EndBlock); 1375} 1376 1377/// Register a global destructor using __cxa_atexit. 1378static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF, 1379 llvm::Constant *dtor, 1380 llvm::Constant *addr, 1381 bool TLS) { 1382 const char *Name = "__cxa_atexit"; 1383 if (TLS) { 1384 const llvm::Triple &T = CGF.getTarget().getTriple(); 1385 Name = T.isMacOSX() ? "_tlv_atexit" : "__cxa_thread_atexit"; 1386 } 1387 1388 // We're assuming that the destructor function is something we can 1389 // reasonably call with the default CC. Go ahead and cast it to the 1390 // right prototype. 1391 llvm::Type *dtorTy = 1392 llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo(); 1393 1394 // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d); 1395 llvm::Type *paramTys[] = { dtorTy, CGF.Int8PtrTy, CGF.Int8PtrTy }; 1396 llvm::FunctionType *atexitTy = 1397 llvm::FunctionType::get(CGF.IntTy, paramTys, false); 1398 1399 // Fetch the actual function. 1400 llvm::Constant *atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name); 1401 if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit)) 1402 fn->setDoesNotThrow(); 1403 1404 // Create a variable that binds the atexit to this shared object. 1405 llvm::Constant *handle = 1406 CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle"); 1407 1408 llvm::Value *args[] = { 1409 llvm::ConstantExpr::getBitCast(dtor, dtorTy), 1410 llvm::ConstantExpr::getBitCast(addr, CGF.Int8PtrTy), 1411 handle 1412 }; 1413 CGF.EmitNounwindRuntimeCall(atexit, args); 1414} 1415 1416/// Register a global destructor as best as we know how. 1417void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, 1418 const VarDecl &D, 1419 llvm::Constant *dtor, 1420 llvm::Constant *addr) { 1421 // Use __cxa_atexit if available. 1422 if (CGM.getCodeGenOpts().CXAAtExit) 1423 return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind()); 1424 1425 if (D.getTLSKind()) 1426 CGM.ErrorUnsupported(&D, "non-trivial TLS destruction"); 1427 1428 // In Apple kexts, we want to add a global destructor entry. 1429 // FIXME: shouldn't this be guarded by some variable? 1430 if (CGM.getLangOpts().AppleKext) { 1431 // Generate a global destructor entry. 1432 return CGM.AddCXXDtorEntry(dtor, addr); 1433 } 1434 1435 CGF.registerGlobalDtorWithAtExit(D, dtor, addr); 1436} 1437 1438/// Get the appropriate linkage for the wrapper function. This is essentially 1439/// the weak form of the variable's linkage; every translation unit which wneeds 1440/// the wrapper emits a copy, and we want the linker to merge them. 1441static llvm::GlobalValue::LinkageTypes getThreadLocalWrapperLinkage( 1442 llvm::GlobalValue::LinkageTypes VarLinkage) { 1443 if (llvm::GlobalValue::isLinkerPrivateLinkage(VarLinkage)) 1444 return llvm::GlobalValue::LinkerPrivateWeakLinkage; 1445 // For internal linkage variables, we don't need an external or weak wrapper. 1446 if (llvm::GlobalValue::isLocalLinkage(VarLinkage)) 1447 return VarLinkage; 1448 return llvm::GlobalValue::WeakODRLinkage; 1449} 1450 1451llvm::Function * 1452ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD, 1453 llvm::GlobalVariable *Var) { 1454 // Mangle the name for the thread_local wrapper function. 1455 SmallString<256> WrapperName; 1456 { 1457 llvm::raw_svector_ostream Out(WrapperName); 1458 getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out); 1459 Out.flush(); 1460 } 1461 1462 if (llvm::Value *V = Var->getParent()->getNamedValue(WrapperName)) 1463 return cast<llvm::Function>(V); 1464 1465 llvm::Type *RetTy = Var->getType(); 1466 if (VD->getType()->isReferenceType()) 1467 RetTy = RetTy->getPointerElementType(); 1468 1469 llvm::FunctionType *FnTy = llvm::FunctionType::get(RetTy, false); 1470 llvm::Function *Wrapper = llvm::Function::Create( 1471 FnTy, getThreadLocalWrapperLinkage(Var->getLinkage()), WrapperName.str(), 1472 &CGM.getModule()); 1473 // Always resolve references to the wrapper at link time. 1474 Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility); 1475 return Wrapper; 1476} 1477 1478void ItaniumCXXABI::EmitThreadLocalInitFuncs( 1479 llvm::ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *> > Decls, 1480 llvm::Function *InitFunc) { 1481 for (unsigned I = 0, N = Decls.size(); I != N; ++I) { 1482 const VarDecl *VD = Decls[I].first; 1483 llvm::GlobalVariable *Var = Decls[I].second; 1484 1485 // Mangle the name for the thread_local initialization function. 1486 SmallString<256> InitFnName; 1487 { 1488 llvm::raw_svector_ostream Out(InitFnName); 1489 getMangleContext().mangleItaniumThreadLocalInit(VD, Out); 1490 Out.flush(); 1491 } 1492 1493 // If we have a definition for the variable, emit the initialization 1494 // function as an alias to the global Init function (if any). Otherwise, 1495 // produce a declaration of the initialization function. 1496 llvm::GlobalValue *Init = 0; 1497 bool InitIsInitFunc = false; 1498 if (VD->hasDefinition()) { 1499 InitIsInitFunc = true; 1500 if (InitFunc) 1501 Init = 1502 new llvm::GlobalAlias(InitFunc->getType(), Var->getLinkage(), 1503 InitFnName.str(), InitFunc, &CGM.getModule()); 1504 } else { 1505 // Emit a weak global function referring to the initialization function. 1506 // This function will not exist if the TU defining the thread_local 1507 // variable in question does not need any dynamic initialization for 1508 // its thread_local variables. 1509 llvm::FunctionType *FnTy = llvm::FunctionType::get(CGM.VoidTy, false); 1510 Init = llvm::Function::Create( 1511 FnTy, llvm::GlobalVariable::ExternalWeakLinkage, InitFnName.str(), 1512 &CGM.getModule()); 1513 } 1514 1515 if (Init) 1516 Init->setVisibility(Var->getVisibility()); 1517 1518 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Var); 1519 llvm::LLVMContext &Context = CGM.getModule().getContext(); 1520 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper); 1521 CGBuilderTy Builder(Entry); 1522 if (InitIsInitFunc) { 1523 if (Init) 1524 Builder.CreateCall(Init); 1525 } else { 1526 // Don't know whether we have an init function. Call it if it exists. 1527 llvm::Value *Have = Builder.CreateIsNotNull(Init); 1528 llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper); 1529 llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper); 1530 Builder.CreateCondBr(Have, InitBB, ExitBB); 1531 1532 Builder.SetInsertPoint(InitBB); 1533 Builder.CreateCall(Init); 1534 Builder.CreateBr(ExitBB); 1535 1536 Builder.SetInsertPoint(ExitBB); 1537 } 1538 1539 // For a reference, the result of the wrapper function is a pointer to 1540 // the referenced object. 1541 llvm::Value *Val = Var; 1542 if (VD->getType()->isReferenceType()) { 1543 llvm::LoadInst *LI = Builder.CreateLoad(Val); 1544 LI->setAlignment(CGM.getContext().getDeclAlign(VD).getQuantity()); 1545 Val = LI; 1546 } 1547 1548 Builder.CreateRet(Val); 1549 } 1550} 1551 1552LValue ItaniumCXXABI::EmitThreadLocalDeclRefExpr(CodeGenFunction &CGF, 1553 const DeclRefExpr *DRE) { 1554 const VarDecl *VD = cast<VarDecl>(DRE->getDecl()); 1555 QualType T = VD->getType(); 1556 llvm::Type *Ty = CGF.getTypes().ConvertTypeForMem(T); 1557 llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD, Ty); 1558 llvm::Function *Wrapper = 1559 getOrCreateThreadLocalWrapper(VD, cast<llvm::GlobalVariable>(Val)); 1560 1561 Val = CGF.Builder.CreateCall(Wrapper); 1562 1563 LValue LV; 1564 if (VD->getType()->isReferenceType()) 1565 LV = CGF.MakeNaturalAlignAddrLValue(Val, T); 1566 else 1567 LV = CGF.MakeAddrLValue(Val, DRE->getType(), 1568 CGF.getContext().getDeclAlign(VD)); 1569 // FIXME: need setObjCGCLValueClass? 1570 return LV; 1571} 1572 1573/// Return whether the given global decl needs a VTT parameter, which it does 1574/// if it's a base constructor or destructor with virtual bases. 1575bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) { 1576 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 1577 1578 // We don't have any virtual bases, just return early. 1579 if (!MD->getParent()->getNumVBases()) 1580 return false; 1581 1582 // Check if we have a base constructor. 1583 if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base) 1584 return true; 1585 1586 // Check if we have a base destructor. 1587 if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base) 1588 return true; 1589 1590 return false; 1591} 1592