ItaniumCXXABI.cpp revision b0f533e716ae5a21ca5682ea235a68082fd5ed28
1//===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This provides C++ code generation targeting the Itanium C++ ABI. The class 11// in this file generates structures that follow the Itanium C++ ABI, which is 12// documented at: 13// http://www.codesourcery.com/public/cxx-abi/abi.html 14// http://www.codesourcery.com/public/cxx-abi/abi-eh.html 15// 16// It also supports the closely-related ARM ABI, documented at: 17// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf 18// 19//===----------------------------------------------------------------------===// 20 21#include "CGCXXABI.h" 22#include "CGRecordLayout.h" 23#include "CGVTables.h" 24#include "CodeGenFunction.h" 25#include "CodeGenModule.h" 26#include "clang/AST/Mangle.h" 27#include "clang/AST/Type.h" 28#include "llvm/IR/DataLayout.h" 29#include "llvm/IR/Intrinsics.h" 30#include "llvm/IR/Value.h" 31 32using namespace clang; 33using namespace CodeGen; 34 35namespace { 36class ItaniumCXXABI : public CodeGen::CGCXXABI { 37protected: 38 bool IsARM; 39 40public: 41 ItaniumCXXABI(CodeGen::CodeGenModule &CGM, bool IsARM = false) : 42 CGCXXABI(CGM), IsARM(IsARM) { } 43 44 bool isReturnTypeIndirect(const CXXRecordDecl *RD) const { 45 // Structures with either a non-trivial destructor or a non-trivial 46 // copy constructor are always indirect. 47 return !RD->hasTrivialDestructor() || RD->hasNonTrivialCopyConstructor(); 48 } 49 50 RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const { 51 // Structures with either a non-trivial destructor or a non-trivial 52 // copy constructor are always indirect. 53 if (!RD->hasTrivialDestructor() || RD->hasNonTrivialCopyConstructor()) 54 return RAA_Indirect; 55 return RAA_Default; 56 } 57 58 bool isZeroInitializable(const MemberPointerType *MPT); 59 60 llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT); 61 62 llvm::Value *EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF, 63 llvm::Value *&This, 64 llvm::Value *MemFnPtr, 65 const MemberPointerType *MPT); 66 67 llvm::Value *EmitMemberDataPointerAddress(CodeGenFunction &CGF, 68 llvm::Value *Base, 69 llvm::Value *MemPtr, 70 const MemberPointerType *MPT); 71 72 llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF, 73 const CastExpr *E, 74 llvm::Value *Src); 75 llvm::Constant *EmitMemberPointerConversion(const CastExpr *E, 76 llvm::Constant *Src); 77 78 llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT); 79 80 llvm::Constant *EmitMemberPointer(const CXXMethodDecl *MD); 81 llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT, 82 CharUnits offset); 83 llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT); 84 llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD, 85 CharUnits ThisAdjustment); 86 87 llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF, 88 llvm::Value *L, 89 llvm::Value *R, 90 const MemberPointerType *MPT, 91 bool Inequality); 92 93 llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF, 94 llvm::Value *Addr, 95 const MemberPointerType *MPT); 96 97 llvm::Value *adjustToCompleteObject(CodeGenFunction &CGF, 98 llvm::Value *ptr, 99 QualType type); 100 101 llvm::Value *GetVirtualBaseClassOffset(CodeGenFunction &CGF, 102 llvm::Value *This, 103 const CXXRecordDecl *ClassDecl, 104 const CXXRecordDecl *BaseClassDecl); 105 106 void BuildConstructorSignature(const CXXConstructorDecl *Ctor, 107 CXXCtorType T, 108 CanQualType &ResTy, 109 SmallVectorImpl<CanQualType> &ArgTys); 110 111 void BuildDestructorSignature(const CXXDestructorDecl *Dtor, 112 CXXDtorType T, 113 CanQualType &ResTy, 114 SmallVectorImpl<CanQualType> &ArgTys); 115 116 void BuildInstanceFunctionParams(CodeGenFunction &CGF, 117 QualType &ResTy, 118 FunctionArgList &Params); 119 120 void EmitInstanceFunctionProlog(CodeGenFunction &CGF); 121 122 llvm::Value *EmitConstructorCall(CodeGenFunction &CGF, 123 const CXXConstructorDecl *D, 124 CXXCtorType Type, bool ForVirtualBase, 125 bool Delegating, 126 llvm::Value *This, 127 CallExpr::const_arg_iterator ArgBeg, 128 CallExpr::const_arg_iterator ArgEnd); 129 130 RValue EmitVirtualDestructorCall(CodeGenFunction &CGF, 131 const CXXDestructorDecl *Dtor, 132 CXXDtorType DtorType, 133 SourceLocation CallLoc, 134 ReturnValueSlot ReturnValue, 135 llvm::Value *This); 136 137 StringRef GetPureVirtualCallName() { return "__cxa_pure_virtual"; } 138 StringRef GetDeletedVirtualCallName() { return "__cxa_deleted_virtual"; } 139 140 CharUnits getArrayCookieSizeImpl(QualType elementType); 141 llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF, 142 llvm::Value *NewPtr, 143 llvm::Value *NumElements, 144 const CXXNewExpr *expr, 145 QualType ElementType); 146 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, 147 llvm::Value *allocPtr, 148 CharUnits cookieSize); 149 150 void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D, 151 llvm::GlobalVariable *DeclPtr, bool PerformInit); 152 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, 153 llvm::Constant *dtor, llvm::Constant *addr); 154 155 llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD, 156 llvm::GlobalVariable *Var); 157 void EmitThreadLocalInitFuncs( 158 llvm::ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *> > Decls, 159 llvm::Function *InitFunc); 160 LValue EmitThreadLocalDeclRefExpr(CodeGenFunction &CGF, 161 const DeclRefExpr *DRE); 162}; 163 164class ARMCXXABI : public ItaniumCXXABI { 165public: 166 ARMCXXABI(CodeGen::CodeGenModule &CGM) : ItaniumCXXABI(CGM, /*ARM*/ true) {} 167 168 void BuildConstructorSignature(const CXXConstructorDecl *Ctor, 169 CXXCtorType T, 170 CanQualType &ResTy, 171 SmallVectorImpl<CanQualType> &ArgTys); 172 173 void BuildDestructorSignature(const CXXDestructorDecl *Dtor, 174 CXXDtorType T, 175 CanQualType &ResTy, 176 SmallVectorImpl<CanQualType> &ArgTys); 177 178 void BuildInstanceFunctionParams(CodeGenFunction &CGF, 179 QualType &ResTy, 180 FunctionArgList &Params); 181 182 void EmitInstanceFunctionProlog(CodeGenFunction &CGF); 183 184 void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV, QualType ResTy); 185 186 CharUnits getArrayCookieSizeImpl(QualType elementType); 187 llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF, 188 llvm::Value *NewPtr, 189 llvm::Value *NumElements, 190 const CXXNewExpr *expr, 191 QualType ElementType); 192 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, llvm::Value *allocPtr, 193 CharUnits cookieSize); 194 195 /// \brief Returns true if the given instance method is one of the 196 /// kinds that the ARM ABI says returns 'this'. 197 bool HasThisReturn(GlobalDecl GD) const { 198 const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(GD.getDecl()); 199 if (!MD) return false; 200 return ((isa<CXXDestructorDecl>(MD) && GD.getDtorType() != Dtor_Deleting) || 201 (isa<CXXConstructorDecl>(MD))); 202 } 203}; 204} 205 206CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) { 207 switch (CGM.getTarget().getCXXABI().getKind()) { 208 // For IR-generation purposes, there's no significant difference 209 // between the ARM and iOS ABIs. 210 case TargetCXXABI::GenericARM: 211 case TargetCXXABI::iOS: 212 return new ARMCXXABI(CGM); 213 214 // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't 215 // include the other 32-bit ARM oddities: constructor/destructor return values 216 // and array cookies. 217 case TargetCXXABI::GenericAArch64: 218 return new ItaniumCXXABI(CGM, /*IsARM = */ true); 219 220 case TargetCXXABI::GenericItanium: 221 return new ItaniumCXXABI(CGM); 222 223 case TargetCXXABI::Microsoft: 224 llvm_unreachable("Microsoft ABI is not Itanium-based"); 225 } 226 llvm_unreachable("bad ABI kind"); 227} 228 229llvm::Type * 230ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) { 231 if (MPT->isMemberDataPointer()) 232 return CGM.PtrDiffTy; 233 return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy, NULL); 234} 235 236/// In the Itanium and ARM ABIs, method pointers have the form: 237/// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr; 238/// 239/// In the Itanium ABI: 240/// - method pointers are virtual if (memptr.ptr & 1) is nonzero 241/// - the this-adjustment is (memptr.adj) 242/// - the virtual offset is (memptr.ptr - 1) 243/// 244/// In the ARM ABI: 245/// - method pointers are virtual if (memptr.adj & 1) is nonzero 246/// - the this-adjustment is (memptr.adj >> 1) 247/// - the virtual offset is (memptr.ptr) 248/// ARM uses 'adj' for the virtual flag because Thumb functions 249/// may be only single-byte aligned. 250/// 251/// If the member is virtual, the adjusted 'this' pointer points 252/// to a vtable pointer from which the virtual offset is applied. 253/// 254/// If the member is non-virtual, memptr.ptr is the address of 255/// the function to call. 256llvm::Value * 257ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF, 258 llvm::Value *&This, 259 llvm::Value *MemFnPtr, 260 const MemberPointerType *MPT) { 261 CGBuilderTy &Builder = CGF.Builder; 262 263 const FunctionProtoType *FPT = 264 MPT->getPointeeType()->getAs<FunctionProtoType>(); 265 const CXXRecordDecl *RD = 266 cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl()); 267 268 llvm::FunctionType *FTy = 269 CGM.getTypes().GetFunctionType( 270 CGM.getTypes().arrangeCXXMethodType(RD, FPT)); 271 272 llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1); 273 274 llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual"); 275 llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual"); 276 llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end"); 277 278 // Extract memptr.adj, which is in the second field. 279 llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj"); 280 281 // Compute the true adjustment. 282 llvm::Value *Adj = RawAdj; 283 if (IsARM) 284 Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted"); 285 286 // Apply the adjustment and cast back to the original struct type 287 // for consistency. 288 llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy()); 289 Ptr = Builder.CreateInBoundsGEP(Ptr, Adj); 290 This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted"); 291 292 // Load the function pointer. 293 llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr"); 294 295 // If the LSB in the function pointer is 1, the function pointer points to 296 // a virtual function. 297 llvm::Value *IsVirtual; 298 if (IsARM) 299 IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1); 300 else 301 IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1); 302 IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual"); 303 Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual); 304 305 // In the virtual path, the adjustment left 'This' pointing to the 306 // vtable of the correct base subobject. The "function pointer" is an 307 // offset within the vtable (+1 for the virtual flag on non-ARM). 308 CGF.EmitBlock(FnVirtual); 309 310 // Cast the adjusted this to a pointer to vtable pointer and load. 311 llvm::Type *VTableTy = Builder.getInt8PtrTy(); 312 llvm::Value *VTable = Builder.CreateBitCast(This, VTableTy->getPointerTo()); 313 VTable = Builder.CreateLoad(VTable, "memptr.vtable"); 314 315 // Apply the offset. 316 llvm::Value *VTableOffset = FnAsInt; 317 if (!IsARM) VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1); 318 VTable = Builder.CreateGEP(VTable, VTableOffset); 319 320 // Load the virtual function to call. 321 VTable = Builder.CreateBitCast(VTable, FTy->getPointerTo()->getPointerTo()); 322 llvm::Value *VirtualFn = Builder.CreateLoad(VTable, "memptr.virtualfn"); 323 CGF.EmitBranch(FnEnd); 324 325 // In the non-virtual path, the function pointer is actually a 326 // function pointer. 327 CGF.EmitBlock(FnNonVirtual); 328 llvm::Value *NonVirtualFn = 329 Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn"); 330 331 // We're done. 332 CGF.EmitBlock(FnEnd); 333 llvm::PHINode *Callee = Builder.CreatePHI(FTy->getPointerTo(), 2); 334 Callee->addIncoming(VirtualFn, FnVirtual); 335 Callee->addIncoming(NonVirtualFn, FnNonVirtual); 336 return Callee; 337} 338 339/// Compute an l-value by applying the given pointer-to-member to a 340/// base object. 341llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF, 342 llvm::Value *Base, 343 llvm::Value *MemPtr, 344 const MemberPointerType *MPT) { 345 assert(MemPtr->getType() == CGM.PtrDiffTy); 346 347 CGBuilderTy &Builder = CGF.Builder; 348 349 unsigned AS = Base->getType()->getPointerAddressSpace(); 350 351 // Cast to char*. 352 Base = Builder.CreateBitCast(Base, Builder.getInt8Ty()->getPointerTo(AS)); 353 354 // Apply the offset, which we assume is non-null. 355 llvm::Value *Addr = Builder.CreateInBoundsGEP(Base, MemPtr, "memptr.offset"); 356 357 // Cast the address to the appropriate pointer type, adopting the 358 // address space of the base pointer. 359 llvm::Type *PType 360 = CGF.ConvertTypeForMem(MPT->getPointeeType())->getPointerTo(AS); 361 return Builder.CreateBitCast(Addr, PType); 362} 363 364/// Perform a bitcast, derived-to-base, or base-to-derived member pointer 365/// conversion. 366/// 367/// Bitcast conversions are always a no-op under Itanium. 368/// 369/// Obligatory offset/adjustment diagram: 370/// <-- offset --> <-- adjustment --> 371/// |--------------------------|----------------------|--------------------| 372/// ^Derived address point ^Base address point ^Member address point 373/// 374/// So when converting a base member pointer to a derived member pointer, 375/// we add the offset to the adjustment because the address point has 376/// decreased; and conversely, when converting a derived MP to a base MP 377/// we subtract the offset from the adjustment because the address point 378/// has increased. 379/// 380/// The standard forbids (at compile time) conversion to and from 381/// virtual bases, which is why we don't have to consider them here. 382/// 383/// The standard forbids (at run time) casting a derived MP to a base 384/// MP when the derived MP does not point to a member of the base. 385/// This is why -1 is a reasonable choice for null data member 386/// pointers. 387llvm::Value * 388ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF, 389 const CastExpr *E, 390 llvm::Value *src) { 391 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer || 392 E->getCastKind() == CK_BaseToDerivedMemberPointer || 393 E->getCastKind() == CK_ReinterpretMemberPointer); 394 395 // Under Itanium, reinterprets don't require any additional processing. 396 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src; 397 398 // Use constant emission if we can. 399 if (isa<llvm::Constant>(src)) 400 return EmitMemberPointerConversion(E, cast<llvm::Constant>(src)); 401 402 llvm::Constant *adj = getMemberPointerAdjustment(E); 403 if (!adj) return src; 404 405 CGBuilderTy &Builder = CGF.Builder; 406 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer); 407 408 const MemberPointerType *destTy = 409 E->getType()->castAs<MemberPointerType>(); 410 411 // For member data pointers, this is just a matter of adding the 412 // offset if the source is non-null. 413 if (destTy->isMemberDataPointer()) { 414 llvm::Value *dst; 415 if (isDerivedToBase) 416 dst = Builder.CreateNSWSub(src, adj, "adj"); 417 else 418 dst = Builder.CreateNSWAdd(src, adj, "adj"); 419 420 // Null check. 421 llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType()); 422 llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull"); 423 return Builder.CreateSelect(isNull, src, dst); 424 } 425 426 // The this-adjustment is left-shifted by 1 on ARM. 427 if (IsARM) { 428 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue(); 429 offset <<= 1; 430 adj = llvm::ConstantInt::get(adj->getType(), offset); 431 } 432 433 llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj"); 434 llvm::Value *dstAdj; 435 if (isDerivedToBase) 436 dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj"); 437 else 438 dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj"); 439 440 return Builder.CreateInsertValue(src, dstAdj, 1); 441} 442 443llvm::Constant * 444ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E, 445 llvm::Constant *src) { 446 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer || 447 E->getCastKind() == CK_BaseToDerivedMemberPointer || 448 E->getCastKind() == CK_ReinterpretMemberPointer); 449 450 // Under Itanium, reinterprets don't require any additional processing. 451 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src; 452 453 // If the adjustment is trivial, we don't need to do anything. 454 llvm::Constant *adj = getMemberPointerAdjustment(E); 455 if (!adj) return src; 456 457 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer); 458 459 const MemberPointerType *destTy = 460 E->getType()->castAs<MemberPointerType>(); 461 462 // For member data pointers, this is just a matter of adding the 463 // offset if the source is non-null. 464 if (destTy->isMemberDataPointer()) { 465 // null maps to null. 466 if (src->isAllOnesValue()) return src; 467 468 if (isDerivedToBase) 469 return llvm::ConstantExpr::getNSWSub(src, adj); 470 else 471 return llvm::ConstantExpr::getNSWAdd(src, adj); 472 } 473 474 // The this-adjustment is left-shifted by 1 on ARM. 475 if (IsARM) { 476 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue(); 477 offset <<= 1; 478 adj = llvm::ConstantInt::get(adj->getType(), offset); 479 } 480 481 llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1); 482 llvm::Constant *dstAdj; 483 if (isDerivedToBase) 484 dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj); 485 else 486 dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj); 487 488 return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1); 489} 490 491llvm::Constant * 492ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) { 493 // Itanium C++ ABI 2.3: 494 // A NULL pointer is represented as -1. 495 if (MPT->isMemberDataPointer()) 496 return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true); 497 498 llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0); 499 llvm::Constant *Values[2] = { Zero, Zero }; 500 return llvm::ConstantStruct::getAnon(Values); 501} 502 503llvm::Constant * 504ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT, 505 CharUnits offset) { 506 // Itanium C++ ABI 2.3: 507 // A pointer to data member is an offset from the base address of 508 // the class object containing it, represented as a ptrdiff_t 509 return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity()); 510} 511 512llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) { 513 return BuildMemberPointer(MD, CharUnits::Zero()); 514} 515 516llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD, 517 CharUnits ThisAdjustment) { 518 assert(MD->isInstance() && "Member function must not be static!"); 519 MD = MD->getCanonicalDecl(); 520 521 CodeGenTypes &Types = CGM.getTypes(); 522 523 // Get the function pointer (or index if this is a virtual function). 524 llvm::Constant *MemPtr[2]; 525 if (MD->isVirtual()) { 526 uint64_t Index = CGM.getVTableContext().getMethodVTableIndex(MD); 527 528 const ASTContext &Context = getContext(); 529 CharUnits PointerWidth = 530 Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0)); 531 uint64_t VTableOffset = (Index * PointerWidth.getQuantity()); 532 533 if (IsARM) { 534 // ARM C++ ABI 3.2.1: 535 // This ABI specifies that adj contains twice the this 536 // adjustment, plus 1 if the member function is virtual. The 537 // least significant bit of adj then makes exactly the same 538 // discrimination as the least significant bit of ptr does for 539 // Itanium. 540 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset); 541 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, 542 2 * ThisAdjustment.getQuantity() + 1); 543 } else { 544 // Itanium C++ ABI 2.3: 545 // For a virtual function, [the pointer field] is 1 plus the 546 // virtual table offset (in bytes) of the function, 547 // represented as a ptrdiff_t. 548 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1); 549 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, 550 ThisAdjustment.getQuantity()); 551 } 552 } else { 553 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>(); 554 llvm::Type *Ty; 555 // Check whether the function has a computable LLVM signature. 556 if (Types.isFuncTypeConvertible(FPT)) { 557 // The function has a computable LLVM signature; use the correct type. 558 Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD)); 559 } else { 560 // Use an arbitrary non-function type to tell GetAddrOfFunction that the 561 // function type is incomplete. 562 Ty = CGM.PtrDiffTy; 563 } 564 llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty); 565 566 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy); 567 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, (IsARM ? 2 : 1) * 568 ThisAdjustment.getQuantity()); 569 } 570 571 return llvm::ConstantStruct::getAnon(MemPtr); 572} 573 574llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP, 575 QualType MPType) { 576 const MemberPointerType *MPT = MPType->castAs<MemberPointerType>(); 577 const ValueDecl *MPD = MP.getMemberPointerDecl(); 578 if (!MPD) 579 return EmitNullMemberPointer(MPT); 580 581 CharUnits ThisAdjustment = getMemberPointerPathAdjustment(MP); 582 583 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD)) 584 return BuildMemberPointer(MD, ThisAdjustment); 585 586 CharUnits FieldOffset = 587 getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD)); 588 return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset); 589} 590 591/// The comparison algorithm is pretty easy: the member pointers are 592/// the same if they're either bitwise identical *or* both null. 593/// 594/// ARM is different here only because null-ness is more complicated. 595llvm::Value * 596ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF, 597 llvm::Value *L, 598 llvm::Value *R, 599 const MemberPointerType *MPT, 600 bool Inequality) { 601 CGBuilderTy &Builder = CGF.Builder; 602 603 llvm::ICmpInst::Predicate Eq; 604 llvm::Instruction::BinaryOps And, Or; 605 if (Inequality) { 606 Eq = llvm::ICmpInst::ICMP_NE; 607 And = llvm::Instruction::Or; 608 Or = llvm::Instruction::And; 609 } else { 610 Eq = llvm::ICmpInst::ICMP_EQ; 611 And = llvm::Instruction::And; 612 Or = llvm::Instruction::Or; 613 } 614 615 // Member data pointers are easy because there's a unique null 616 // value, so it just comes down to bitwise equality. 617 if (MPT->isMemberDataPointer()) 618 return Builder.CreateICmp(Eq, L, R); 619 620 // For member function pointers, the tautologies are more complex. 621 // The Itanium tautology is: 622 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj)) 623 // The ARM tautology is: 624 // (L == R) <==> (L.ptr == R.ptr && 625 // (L.adj == R.adj || 626 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0))) 627 // The inequality tautologies have exactly the same structure, except 628 // applying De Morgan's laws. 629 630 llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr"); 631 llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr"); 632 633 // This condition tests whether L.ptr == R.ptr. This must always be 634 // true for equality to hold. 635 llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr"); 636 637 // This condition, together with the assumption that L.ptr == R.ptr, 638 // tests whether the pointers are both null. ARM imposes an extra 639 // condition. 640 llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType()); 641 llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null"); 642 643 // This condition tests whether L.adj == R.adj. If this isn't 644 // true, the pointers are unequal unless they're both null. 645 llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj"); 646 llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj"); 647 llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj"); 648 649 // Null member function pointers on ARM clear the low bit of Adj, 650 // so the zero condition has to check that neither low bit is set. 651 if (IsARM) { 652 llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1); 653 654 // Compute (l.adj | r.adj) & 1 and test it against zero. 655 llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj"); 656 llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One); 657 llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero, 658 "cmp.or.adj"); 659 EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero); 660 } 661 662 // Tie together all our conditions. 663 llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq); 664 Result = Builder.CreateBinOp(And, PtrEq, Result, 665 Inequality ? "memptr.ne" : "memptr.eq"); 666 return Result; 667} 668 669llvm::Value * 670ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF, 671 llvm::Value *MemPtr, 672 const MemberPointerType *MPT) { 673 CGBuilderTy &Builder = CGF.Builder; 674 675 /// For member data pointers, this is just a check against -1. 676 if (MPT->isMemberDataPointer()) { 677 assert(MemPtr->getType() == CGM.PtrDiffTy); 678 llvm::Value *NegativeOne = 679 llvm::Constant::getAllOnesValue(MemPtr->getType()); 680 return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool"); 681 } 682 683 // In Itanium, a member function pointer is not null if 'ptr' is not null. 684 llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr"); 685 686 llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0); 687 llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool"); 688 689 // On ARM, a member function pointer is also non-null if the low bit of 'adj' 690 // (the virtual bit) is set. 691 if (IsARM) { 692 llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1); 693 llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj"); 694 llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit"); 695 llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero, 696 "memptr.isvirtual"); 697 Result = Builder.CreateOr(Result, IsVirtual); 698 } 699 700 return Result; 701} 702 703/// The Itanium ABI requires non-zero initialization only for data 704/// member pointers, for which '0' is a valid offset. 705bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) { 706 return MPT->getPointeeType()->isFunctionType(); 707} 708 709/// The Itanium ABI always places an offset to the complete object 710/// at entry -2 in the vtable. 711llvm::Value *ItaniumCXXABI::adjustToCompleteObject(CodeGenFunction &CGF, 712 llvm::Value *ptr, 713 QualType type) { 714 // Grab the vtable pointer as an intptr_t*. 715 llvm::Value *vtable = CGF.GetVTablePtr(ptr, CGF.IntPtrTy->getPointerTo()); 716 717 // Track back to entry -2 and pull out the offset there. 718 llvm::Value *offsetPtr = 719 CGF.Builder.CreateConstInBoundsGEP1_64(vtable, -2, "complete-offset.ptr"); 720 llvm::LoadInst *offset = CGF.Builder.CreateLoad(offsetPtr); 721 offset->setAlignment(CGF.PointerAlignInBytes); 722 723 // Apply the offset. 724 ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8PtrTy); 725 return CGF.Builder.CreateInBoundsGEP(ptr, offset); 726} 727 728llvm::Value * 729ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF, 730 llvm::Value *This, 731 const CXXRecordDecl *ClassDecl, 732 const CXXRecordDecl *BaseClassDecl) { 733 llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy); 734 CharUnits VBaseOffsetOffset = 735 CGM.getVTableContext().getVirtualBaseOffsetOffset(ClassDecl, BaseClassDecl); 736 737 llvm::Value *VBaseOffsetPtr = 738 CGF.Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(), 739 "vbase.offset.ptr"); 740 VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr, 741 CGM.PtrDiffTy->getPointerTo()); 742 743 llvm::Value *VBaseOffset = 744 CGF.Builder.CreateLoad(VBaseOffsetPtr, "vbase.offset"); 745 746 return VBaseOffset; 747} 748 749/// The generic ABI passes 'this', plus a VTT if it's initializing a 750/// base subobject. 751void ItaniumCXXABI::BuildConstructorSignature(const CXXConstructorDecl *Ctor, 752 CXXCtorType Type, 753 CanQualType &ResTy, 754 SmallVectorImpl<CanQualType> &ArgTys) { 755 ASTContext &Context = getContext(); 756 757 // 'this' is already there. 758 759 // Check if we need to add a VTT parameter (which has type void **). 760 if (Type == Ctor_Base && Ctor->getParent()->getNumVBases() != 0) 761 ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy)); 762} 763 764/// The ARM ABI does the same as the Itanium ABI, but returns 'this'. 765void ARMCXXABI::BuildConstructorSignature(const CXXConstructorDecl *Ctor, 766 CXXCtorType Type, 767 CanQualType &ResTy, 768 SmallVectorImpl<CanQualType> &ArgTys) { 769 ItaniumCXXABI::BuildConstructorSignature(Ctor, Type, ResTy, ArgTys); 770 ResTy = ArgTys[0]; 771} 772 773/// The generic ABI passes 'this', plus a VTT if it's destroying a 774/// base subobject. 775void ItaniumCXXABI::BuildDestructorSignature(const CXXDestructorDecl *Dtor, 776 CXXDtorType Type, 777 CanQualType &ResTy, 778 SmallVectorImpl<CanQualType> &ArgTys) { 779 ASTContext &Context = getContext(); 780 781 // 'this' is already there. 782 783 // Check if we need to add a VTT parameter (which has type void **). 784 if (Type == Dtor_Base && Dtor->getParent()->getNumVBases() != 0) 785 ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy)); 786} 787 788/// The ARM ABI does the same as the Itanium ABI, but returns 'this' 789/// for non-deleting destructors. 790void ARMCXXABI::BuildDestructorSignature(const CXXDestructorDecl *Dtor, 791 CXXDtorType Type, 792 CanQualType &ResTy, 793 SmallVectorImpl<CanQualType> &ArgTys) { 794 ItaniumCXXABI::BuildDestructorSignature(Dtor, Type, ResTy, ArgTys); 795 796 if (Type != Dtor_Deleting) 797 ResTy = ArgTys[0]; 798} 799 800void ItaniumCXXABI::BuildInstanceFunctionParams(CodeGenFunction &CGF, 801 QualType &ResTy, 802 FunctionArgList &Params) { 803 /// Create the 'this' variable. 804 BuildThisParam(CGF, Params); 805 806 const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl()); 807 assert(MD->isInstance()); 808 809 // Check if we need a VTT parameter as well. 810 if (CodeGenVTables::needsVTTParameter(CGF.CurGD)) { 811 ASTContext &Context = getContext(); 812 813 // FIXME: avoid the fake decl 814 QualType T = Context.getPointerType(Context.VoidPtrTy); 815 ImplicitParamDecl *VTTDecl 816 = ImplicitParamDecl::Create(Context, 0, MD->getLocation(), 817 &Context.Idents.get("vtt"), T); 818 Params.push_back(VTTDecl); 819 getVTTDecl(CGF) = VTTDecl; 820 } 821} 822 823void ARMCXXABI::BuildInstanceFunctionParams(CodeGenFunction &CGF, 824 QualType &ResTy, 825 FunctionArgList &Params) { 826 ItaniumCXXABI::BuildInstanceFunctionParams(CGF, ResTy, Params); 827 828 // Return 'this' from certain constructors and destructors. 829 if (HasThisReturn(CGF.CurGD)) 830 ResTy = Params[0]->getType(); 831} 832 833void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) { 834 /// Initialize the 'this' slot. 835 EmitThisParam(CGF); 836 837 /// Initialize the 'vtt' slot if needed. 838 if (getVTTDecl(CGF)) { 839 getVTTValue(CGF) 840 = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(getVTTDecl(CGF)), 841 "vtt"); 842 } 843} 844 845void ARMCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) { 846 ItaniumCXXABI::EmitInstanceFunctionProlog(CGF); 847 848 /// Initialize the return slot to 'this' at the start of the 849 /// function. 850 if (HasThisReturn(CGF.CurGD)) 851 CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue); 852} 853 854llvm::Value *ItaniumCXXABI::EmitConstructorCall(CodeGenFunction &CGF, 855 const CXXConstructorDecl *D, 856 CXXCtorType Type, bool ForVirtualBase, 857 bool Delegating, 858 llvm::Value *This, 859 CallExpr::const_arg_iterator ArgBeg, 860 CallExpr::const_arg_iterator ArgEnd) { 861 llvm::Value *VTT = CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, 862 Delegating); 863 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy); 864 llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, Type); 865 866 // FIXME: Provide a source location here. 867 CGF.EmitCXXMemberCall(D, SourceLocation(), Callee, ReturnValueSlot(), This, 868 VTT, VTTTy, ArgBeg, ArgEnd); 869 return Callee; 870} 871 872RValue ItaniumCXXABI::EmitVirtualDestructorCall(CodeGenFunction &CGF, 873 const CXXDestructorDecl *Dtor, 874 CXXDtorType DtorType, 875 SourceLocation CallLoc, 876 ReturnValueSlot ReturnValue, 877 llvm::Value *This) { 878 assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete); 879 880 const CGFunctionInfo *FInfo 881 = &CGM.getTypes().arrangeCXXDestructor(Dtor, DtorType); 882 llvm::Type *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo); 883 llvm::Value *Callee = CGF.BuildVirtualCall(Dtor, DtorType, This, Ty); 884 885 return CGF.EmitCXXMemberCall(Dtor, CallLoc, Callee, ReturnValue, This, 886 /*ImplicitParam=*/0, QualType(), 0, 0); 887} 888 889void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF, 890 RValue RV, QualType ResultType) { 891 if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl())) 892 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType); 893 894 // Destructor thunks in the ARM ABI have indeterminate results. 895 llvm::Type *T = 896 cast<llvm::PointerType>(CGF.ReturnValue->getType())->getElementType(); 897 RValue Undef = RValue::get(llvm::UndefValue::get(T)); 898 return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType); 899} 900 901/************************** Array allocation cookies **************************/ 902 903CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) { 904 // The array cookie is a size_t; pad that up to the element alignment. 905 // The cookie is actually right-justified in that space. 906 return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes), 907 CGM.getContext().getTypeAlignInChars(elementType)); 908} 909 910llvm::Value *ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF, 911 llvm::Value *NewPtr, 912 llvm::Value *NumElements, 913 const CXXNewExpr *expr, 914 QualType ElementType) { 915 assert(requiresArrayCookie(expr)); 916 917 unsigned AS = NewPtr->getType()->getPointerAddressSpace(); 918 919 ASTContext &Ctx = getContext(); 920 QualType SizeTy = Ctx.getSizeType(); 921 CharUnits SizeSize = Ctx.getTypeSizeInChars(SizeTy); 922 923 // The size of the cookie. 924 CharUnits CookieSize = 925 std::max(SizeSize, Ctx.getTypeAlignInChars(ElementType)); 926 assert(CookieSize == getArrayCookieSizeImpl(ElementType)); 927 928 // Compute an offset to the cookie. 929 llvm::Value *CookiePtr = NewPtr; 930 CharUnits CookieOffset = CookieSize - SizeSize; 931 if (!CookieOffset.isZero()) 932 CookiePtr = CGF.Builder.CreateConstInBoundsGEP1_64(CookiePtr, 933 CookieOffset.getQuantity()); 934 935 // Write the number of elements into the appropriate slot. 936 llvm::Value *NumElementsPtr 937 = CGF.Builder.CreateBitCast(CookiePtr, 938 CGF.ConvertType(SizeTy)->getPointerTo(AS)); 939 CGF.Builder.CreateStore(NumElements, NumElementsPtr); 940 941 // Finally, compute a pointer to the actual data buffer by skipping 942 // over the cookie completely. 943 return CGF.Builder.CreateConstInBoundsGEP1_64(NewPtr, 944 CookieSize.getQuantity()); 945} 946 947llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF, 948 llvm::Value *allocPtr, 949 CharUnits cookieSize) { 950 // The element size is right-justified in the cookie. 951 llvm::Value *numElementsPtr = allocPtr; 952 CharUnits numElementsOffset = 953 cookieSize - CharUnits::fromQuantity(CGF.SizeSizeInBytes); 954 if (!numElementsOffset.isZero()) 955 numElementsPtr = 956 CGF.Builder.CreateConstInBoundsGEP1_64(numElementsPtr, 957 numElementsOffset.getQuantity()); 958 959 unsigned AS = allocPtr->getType()->getPointerAddressSpace(); 960 numElementsPtr = 961 CGF.Builder.CreateBitCast(numElementsPtr, CGF.SizeTy->getPointerTo(AS)); 962 return CGF.Builder.CreateLoad(numElementsPtr); 963} 964 965CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) { 966 // ARM says that the cookie is always: 967 // struct array_cookie { 968 // std::size_t element_size; // element_size != 0 969 // std::size_t element_count; 970 // }; 971 // But the base ABI doesn't give anything an alignment greater than 972 // 8, so we can dismiss this as typical ABI-author blindness to 973 // actual language complexity and round up to the element alignment. 974 return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes), 975 CGM.getContext().getTypeAlignInChars(elementType)); 976} 977 978llvm::Value *ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF, 979 llvm::Value *newPtr, 980 llvm::Value *numElements, 981 const CXXNewExpr *expr, 982 QualType elementType) { 983 assert(requiresArrayCookie(expr)); 984 985 // NewPtr is a char*, but we generalize to arbitrary addrspaces. 986 unsigned AS = newPtr->getType()->getPointerAddressSpace(); 987 988 // The cookie is always at the start of the buffer. 989 llvm::Value *cookie = newPtr; 990 991 // The first element is the element size. 992 cookie = CGF.Builder.CreateBitCast(cookie, CGF.SizeTy->getPointerTo(AS)); 993 llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy, 994 getContext().getTypeSizeInChars(elementType).getQuantity()); 995 CGF.Builder.CreateStore(elementSize, cookie); 996 997 // The second element is the element count. 998 cookie = CGF.Builder.CreateConstInBoundsGEP1_32(cookie, 1); 999 CGF.Builder.CreateStore(numElements, cookie); 1000 1001 // Finally, compute a pointer to the actual data buffer by skipping 1002 // over the cookie completely. 1003 CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType); 1004 return CGF.Builder.CreateConstInBoundsGEP1_64(newPtr, 1005 cookieSize.getQuantity()); 1006} 1007 1008llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF, 1009 llvm::Value *allocPtr, 1010 CharUnits cookieSize) { 1011 // The number of elements is at offset sizeof(size_t) relative to 1012 // the allocated pointer. 1013 llvm::Value *numElementsPtr 1014 = CGF.Builder.CreateConstInBoundsGEP1_64(allocPtr, CGF.SizeSizeInBytes); 1015 1016 unsigned AS = allocPtr->getType()->getPointerAddressSpace(); 1017 numElementsPtr = 1018 CGF.Builder.CreateBitCast(numElementsPtr, CGF.SizeTy->getPointerTo(AS)); 1019 return CGF.Builder.CreateLoad(numElementsPtr); 1020} 1021 1022/*********************** Static local initialization **************************/ 1023 1024static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM, 1025 llvm::PointerType *GuardPtrTy) { 1026 // int __cxa_guard_acquire(__guard *guard_object); 1027 llvm::FunctionType *FTy = 1028 llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy), 1029 GuardPtrTy, /*isVarArg=*/false); 1030 return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_acquire", 1031 llvm::AttributeSet::get(CGM.getLLVMContext(), 1032 llvm::AttributeSet::FunctionIndex, 1033 llvm::Attribute::NoUnwind)); 1034} 1035 1036static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM, 1037 llvm::PointerType *GuardPtrTy) { 1038 // void __cxa_guard_release(__guard *guard_object); 1039 llvm::FunctionType *FTy = 1040 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false); 1041 return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_release", 1042 llvm::AttributeSet::get(CGM.getLLVMContext(), 1043 llvm::AttributeSet::FunctionIndex, 1044 llvm::Attribute::NoUnwind)); 1045} 1046 1047static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM, 1048 llvm::PointerType *GuardPtrTy) { 1049 // void __cxa_guard_abort(__guard *guard_object); 1050 llvm::FunctionType *FTy = 1051 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false); 1052 return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_abort", 1053 llvm::AttributeSet::get(CGM.getLLVMContext(), 1054 llvm::AttributeSet::FunctionIndex, 1055 llvm::Attribute::NoUnwind)); 1056} 1057 1058namespace { 1059 struct CallGuardAbort : EHScopeStack::Cleanup { 1060 llvm::GlobalVariable *Guard; 1061 CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {} 1062 1063 void Emit(CodeGenFunction &CGF, Flags flags) { 1064 CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()), 1065 Guard); 1066 } 1067 }; 1068} 1069 1070/// The ARM code here follows the Itanium code closely enough that we 1071/// just special-case it at particular places. 1072void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF, 1073 const VarDecl &D, 1074 llvm::GlobalVariable *var, 1075 bool shouldPerformInit) { 1076 CGBuilderTy &Builder = CGF.Builder; 1077 1078 // We only need to use thread-safe statics for local non-TLS variables; 1079 // global initialization is always single-threaded. 1080 bool threadsafe = getContext().getLangOpts().ThreadsafeStatics && 1081 D.isLocalVarDecl() && !D.getTLSKind(); 1082 1083 // If we have a global variable with internal linkage and thread-safe statics 1084 // are disabled, we can just let the guard variable be of type i8. 1085 bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage(); 1086 1087 llvm::IntegerType *guardTy; 1088 if (useInt8GuardVariable) { 1089 guardTy = CGF.Int8Ty; 1090 } else { 1091 // Guard variables are 64 bits in the generic ABI and size width on ARM 1092 // (i.e. 32-bit on AArch32, 64-bit on AArch64). 1093 guardTy = (IsARM ? CGF.SizeTy : CGF.Int64Ty); 1094 } 1095 llvm::PointerType *guardPtrTy = guardTy->getPointerTo(); 1096 1097 // Create the guard variable if we don't already have it (as we 1098 // might if we're double-emitting this function body). 1099 llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D); 1100 if (!guard) { 1101 // Mangle the name for the guard. 1102 SmallString<256> guardName; 1103 { 1104 llvm::raw_svector_ostream out(guardName); 1105 getMangleContext().mangleItaniumGuardVariable(&D, out); 1106 out.flush(); 1107 } 1108 1109 // Create the guard variable with a zero-initializer. 1110 // Just absorb linkage and visibility from the guarded variable. 1111 guard = new llvm::GlobalVariable(CGM.getModule(), guardTy, 1112 false, var->getLinkage(), 1113 llvm::ConstantInt::get(guardTy, 0), 1114 guardName.str()); 1115 guard->setVisibility(var->getVisibility()); 1116 // If the variable is thread-local, so is its guard variable. 1117 guard->setThreadLocalMode(var->getThreadLocalMode()); 1118 1119 CGM.setStaticLocalDeclGuardAddress(&D, guard); 1120 } 1121 1122 // Test whether the variable has completed initialization. 1123 llvm::Value *isInitialized; 1124 1125 // ARM C++ ABI 3.2.3.1: 1126 // To support the potential use of initialization guard variables 1127 // as semaphores that are the target of ARM SWP and LDREX/STREX 1128 // synchronizing instructions we define a static initialization 1129 // guard variable to be a 4-byte aligned, 4- byte word with the 1130 // following inline access protocol. 1131 // #define INITIALIZED 1 1132 // if ((obj_guard & INITIALIZED) != INITIALIZED) { 1133 // if (__cxa_guard_acquire(&obj_guard)) 1134 // ... 1135 // } 1136 if (IsARM && !useInt8GuardVariable) { 1137 llvm::Value *V = Builder.CreateLoad(guard); 1138 llvm::Value *Test1 = llvm::ConstantInt::get(guardTy, 1); 1139 V = Builder.CreateAnd(V, Test1); 1140 isInitialized = Builder.CreateIsNull(V, "guard.uninitialized"); 1141 1142 // Itanium C++ ABI 3.3.2: 1143 // The following is pseudo-code showing how these functions can be used: 1144 // if (obj_guard.first_byte == 0) { 1145 // if ( __cxa_guard_acquire (&obj_guard) ) { 1146 // try { 1147 // ... initialize the object ...; 1148 // } catch (...) { 1149 // __cxa_guard_abort (&obj_guard); 1150 // throw; 1151 // } 1152 // ... queue object destructor with __cxa_atexit() ...; 1153 // __cxa_guard_release (&obj_guard); 1154 // } 1155 // } 1156 } else { 1157 // Load the first byte of the guard variable. 1158 llvm::LoadInst *LI = 1159 Builder.CreateLoad(Builder.CreateBitCast(guard, CGM.Int8PtrTy)); 1160 LI->setAlignment(1); 1161 1162 // Itanium ABI: 1163 // An implementation supporting thread-safety on multiprocessor 1164 // systems must also guarantee that references to the initialized 1165 // object do not occur before the load of the initialization flag. 1166 // 1167 // In LLVM, we do this by marking the load Acquire. 1168 if (threadsafe) 1169 LI->setAtomic(llvm::Acquire); 1170 1171 isInitialized = Builder.CreateIsNull(LI, "guard.uninitialized"); 1172 } 1173 1174 llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check"); 1175 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end"); 1176 1177 // Check if the first byte of the guard variable is zero. 1178 Builder.CreateCondBr(isInitialized, InitCheckBlock, EndBlock); 1179 1180 CGF.EmitBlock(InitCheckBlock); 1181 1182 // Variables used when coping with thread-safe statics and exceptions. 1183 if (threadsafe) { 1184 // Call __cxa_guard_acquire. 1185 llvm::Value *V 1186 = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard); 1187 1188 llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init"); 1189 1190 Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"), 1191 InitBlock, EndBlock); 1192 1193 // Call __cxa_guard_abort along the exceptional edge. 1194 CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard); 1195 1196 CGF.EmitBlock(InitBlock); 1197 } 1198 1199 // Emit the initializer and add a global destructor if appropriate. 1200 CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit); 1201 1202 if (threadsafe) { 1203 // Pop the guard-abort cleanup if we pushed one. 1204 CGF.PopCleanupBlock(); 1205 1206 // Call __cxa_guard_release. This cannot throw. 1207 CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy), guard); 1208 } else { 1209 Builder.CreateStore(llvm::ConstantInt::get(guardTy, 1), guard); 1210 } 1211 1212 CGF.EmitBlock(EndBlock); 1213} 1214 1215/// Register a global destructor using __cxa_atexit. 1216static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF, 1217 llvm::Constant *dtor, 1218 llvm::Constant *addr, 1219 bool TLS) { 1220 const char *Name = "__cxa_atexit"; 1221 if (TLS) { 1222 const llvm::Triple &T = CGF.getTarget().getTriple(); 1223 Name = T.isMacOSX() ? "_tlv_atexit" : "__cxa_thread_atexit"; 1224 } 1225 1226 // We're assuming that the destructor function is something we can 1227 // reasonably call with the default CC. Go ahead and cast it to the 1228 // right prototype. 1229 llvm::Type *dtorTy = 1230 llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo(); 1231 1232 // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d); 1233 llvm::Type *paramTys[] = { dtorTy, CGF.Int8PtrTy, CGF.Int8PtrTy }; 1234 llvm::FunctionType *atexitTy = 1235 llvm::FunctionType::get(CGF.IntTy, paramTys, false); 1236 1237 // Fetch the actual function. 1238 llvm::Constant *atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name); 1239 if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit)) 1240 fn->setDoesNotThrow(); 1241 1242 // Create a variable that binds the atexit to this shared object. 1243 llvm::Constant *handle = 1244 CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle"); 1245 1246 llvm::Value *args[] = { 1247 llvm::ConstantExpr::getBitCast(dtor, dtorTy), 1248 llvm::ConstantExpr::getBitCast(addr, CGF.Int8PtrTy), 1249 handle 1250 }; 1251 CGF.EmitNounwindRuntimeCall(atexit, args); 1252} 1253 1254/// Register a global destructor as best as we know how. 1255void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, 1256 const VarDecl &D, 1257 llvm::Constant *dtor, 1258 llvm::Constant *addr) { 1259 // Use __cxa_atexit if available. 1260 if (CGM.getCodeGenOpts().CXAAtExit) 1261 return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind()); 1262 1263 if (D.getTLSKind()) 1264 CGM.ErrorUnsupported(&D, "non-trivial TLS destruction"); 1265 1266 // In Apple kexts, we want to add a global destructor entry. 1267 // FIXME: shouldn't this be guarded by some variable? 1268 if (CGM.getLangOpts().AppleKext) { 1269 // Generate a global destructor entry. 1270 return CGM.AddCXXDtorEntry(dtor, addr); 1271 } 1272 1273 CGF.registerGlobalDtorWithAtExit(dtor, addr); 1274} 1275 1276/// Get the appropriate linkage for the wrapper function. This is essentially 1277/// the weak form of the variable's linkage; every translation unit which wneeds 1278/// the wrapper emits a copy, and we want the linker to merge them. 1279static llvm::GlobalValue::LinkageTypes getThreadLocalWrapperLinkage( 1280 llvm::GlobalValue::LinkageTypes VarLinkage) { 1281 if (llvm::GlobalValue::isLinkerPrivateLinkage(VarLinkage)) 1282 return llvm::GlobalValue::LinkerPrivateWeakLinkage; 1283 // For internal linkage variables, we don't need an external or weak wrapper. 1284 if (llvm::GlobalValue::isLocalLinkage(VarLinkage)) 1285 return VarLinkage; 1286 return llvm::GlobalValue::WeakODRLinkage; 1287} 1288 1289llvm::Function * 1290ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD, 1291 llvm::GlobalVariable *Var) { 1292 // Mangle the name for the thread_local wrapper function. 1293 SmallString<256> WrapperName; 1294 { 1295 llvm::raw_svector_ostream Out(WrapperName); 1296 getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out); 1297 Out.flush(); 1298 } 1299 1300 if (llvm::Value *V = Var->getParent()->getNamedValue(WrapperName)) 1301 return cast<llvm::Function>(V); 1302 1303 llvm::Type *RetTy = Var->getType(); 1304 if (VD->getType()->isReferenceType()) 1305 RetTy = RetTy->getPointerElementType(); 1306 1307 llvm::FunctionType *FnTy = llvm::FunctionType::get(RetTy, false); 1308 llvm::Function *Wrapper = llvm::Function::Create( 1309 FnTy, getThreadLocalWrapperLinkage(Var->getLinkage()), WrapperName.str(), 1310 &CGM.getModule()); 1311 // Always resolve references to the wrapper at link time. 1312 Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility); 1313 return Wrapper; 1314} 1315 1316void ItaniumCXXABI::EmitThreadLocalInitFuncs( 1317 llvm::ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *> > Decls, 1318 llvm::Function *InitFunc) { 1319 for (unsigned I = 0, N = Decls.size(); I != N; ++I) { 1320 const VarDecl *VD = Decls[I].first; 1321 llvm::GlobalVariable *Var = Decls[I].second; 1322 1323 // Mangle the name for the thread_local initialization function. 1324 SmallString<256> InitFnName; 1325 { 1326 llvm::raw_svector_ostream Out(InitFnName); 1327 getMangleContext().mangleItaniumThreadLocalInit(VD, Out); 1328 Out.flush(); 1329 } 1330 1331 // If we have a definition for the variable, emit the initialization 1332 // function as an alias to the global Init function (if any). Otherwise, 1333 // produce a declaration of the initialization function. 1334 llvm::GlobalValue *Init = 0; 1335 bool InitIsInitFunc = false; 1336 if (VD->hasDefinition()) { 1337 InitIsInitFunc = true; 1338 if (InitFunc) 1339 Init = 1340 new llvm::GlobalAlias(InitFunc->getType(), Var->getLinkage(), 1341 InitFnName.str(), InitFunc, &CGM.getModule()); 1342 } else { 1343 // Emit a weak global function referring to the initialization function. 1344 // This function will not exist if the TU defining the thread_local 1345 // variable in question does not need any dynamic initialization for 1346 // its thread_local variables. 1347 llvm::FunctionType *FnTy = llvm::FunctionType::get(CGM.VoidTy, false); 1348 Init = llvm::Function::Create( 1349 FnTy, llvm::GlobalVariable::ExternalWeakLinkage, InitFnName.str(), 1350 &CGM.getModule()); 1351 } 1352 1353 if (Init) 1354 Init->setVisibility(Var->getVisibility()); 1355 1356 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Var); 1357 llvm::LLVMContext &Context = CGM.getModule().getContext(); 1358 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper); 1359 CGBuilderTy Builder(Entry); 1360 if (InitIsInitFunc) { 1361 if (Init) 1362 Builder.CreateCall(Init); 1363 } else { 1364 // Don't know whether we have an init function. Call it if it exists. 1365 llvm::Value *Have = Builder.CreateIsNotNull(Init); 1366 llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper); 1367 llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper); 1368 Builder.CreateCondBr(Have, InitBB, ExitBB); 1369 1370 Builder.SetInsertPoint(InitBB); 1371 Builder.CreateCall(Init); 1372 Builder.CreateBr(ExitBB); 1373 1374 Builder.SetInsertPoint(ExitBB); 1375 } 1376 1377 // For a reference, the result of the wrapper function is a pointer to 1378 // the referenced object. 1379 llvm::Value *Val = Var; 1380 if (VD->getType()->isReferenceType()) { 1381 llvm::LoadInst *LI = Builder.CreateLoad(Val); 1382 LI->setAlignment(CGM.getContext().getDeclAlign(VD).getQuantity()); 1383 Val = LI; 1384 } 1385 1386 Builder.CreateRet(Val); 1387 } 1388} 1389 1390LValue ItaniumCXXABI::EmitThreadLocalDeclRefExpr(CodeGenFunction &CGF, 1391 const DeclRefExpr *DRE) { 1392 const VarDecl *VD = cast<VarDecl>(DRE->getDecl()); 1393 QualType T = VD->getType(); 1394 llvm::Type *Ty = CGF.getTypes().ConvertTypeForMem(T); 1395 llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD, Ty); 1396 llvm::Function *Wrapper = 1397 getOrCreateThreadLocalWrapper(VD, cast<llvm::GlobalVariable>(Val)); 1398 1399 Val = CGF.Builder.CreateCall(Wrapper); 1400 1401 LValue LV; 1402 if (VD->getType()->isReferenceType()) 1403 LV = CGF.MakeNaturalAlignAddrLValue(Val, T); 1404 else 1405 LV = CGF.MakeAddrLValue(Val, DRE->getType(), 1406 CGF.getContext().getDeclAlign(VD)); 1407 // FIXME: need setObjCGCLValueClass? 1408 return LV; 1409} 1410