ItaniumCXXABI.cpp revision ed23bdf69dd63e4fd01c02b12a13d1e6cbff9c2f
1//===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This provides C++ code generation targeting the Itanium C++ ABI. The class 11// in this file generates structures that follow the Itanium C++ ABI, which is 12// documented at: 13// http://www.codesourcery.com/public/cxx-abi/abi.html 14// http://www.codesourcery.com/public/cxx-abi/abi-eh.html 15// 16// It also supports the closely-related ARM ABI, documented at: 17// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf 18// 19//===----------------------------------------------------------------------===// 20 21#include "CGCXXABI.h" 22#include "CGRecordLayout.h" 23#include "CGVTables.h" 24#include "CodeGenFunction.h" 25#include "CodeGenModule.h" 26#include "clang/AST/Mangle.h" 27#include "clang/AST/Type.h" 28#include "llvm/IR/DataLayout.h" 29#include "llvm/IR/Intrinsics.h" 30#include "llvm/IR/Value.h" 31 32using namespace clang; 33using namespace CodeGen; 34 35namespace { 36class ItaniumCXXABI : public CodeGen::CGCXXABI { 37protected: 38 bool IsARM; 39 40public: 41 ItaniumCXXABI(CodeGen::CodeGenModule &CGM, bool IsARM = false) : 42 CGCXXABI(CGM), IsARM(IsARM) { } 43 44 bool isReturnTypeIndirect(const CXXRecordDecl *RD) const { 45 // Structures with either a non-trivial destructor or a non-trivial 46 // copy constructor are always indirect. 47 return !RD->hasTrivialDestructor() || RD->hasNonTrivialCopyConstructor(); 48 } 49 50 RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const { 51 // Structures with either a non-trivial destructor or a non-trivial 52 // copy constructor are always indirect. 53 if (!RD->hasTrivialDestructor() || RD->hasNonTrivialCopyConstructor()) 54 return RAA_Indirect; 55 return RAA_Default; 56 } 57 58 bool isZeroInitializable(const MemberPointerType *MPT); 59 60 llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT); 61 62 llvm::Value *EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF, 63 llvm::Value *&This, 64 llvm::Value *MemFnPtr, 65 const MemberPointerType *MPT); 66 67 llvm::Value *EmitMemberDataPointerAddress(CodeGenFunction &CGF, 68 llvm::Value *Base, 69 llvm::Value *MemPtr, 70 const MemberPointerType *MPT); 71 72 llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF, 73 const CastExpr *E, 74 llvm::Value *Src); 75 llvm::Constant *EmitMemberPointerConversion(const CastExpr *E, 76 llvm::Constant *Src); 77 78 llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT); 79 80 llvm::Constant *EmitMemberPointer(const CXXMethodDecl *MD); 81 llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT, 82 CharUnits offset); 83 llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT); 84 llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD, 85 CharUnits ThisAdjustment); 86 87 llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF, 88 llvm::Value *L, 89 llvm::Value *R, 90 const MemberPointerType *MPT, 91 bool Inequality); 92 93 llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF, 94 llvm::Value *Addr, 95 const MemberPointerType *MPT); 96 97 llvm::Value *adjustToCompleteObject(CodeGenFunction &CGF, 98 llvm::Value *ptr, 99 QualType type); 100 101 void BuildConstructorSignature(const CXXConstructorDecl *Ctor, 102 CXXCtorType T, 103 CanQualType &ResTy, 104 SmallVectorImpl<CanQualType> &ArgTys); 105 106 void BuildDestructorSignature(const CXXDestructorDecl *Dtor, 107 CXXDtorType T, 108 CanQualType &ResTy, 109 SmallVectorImpl<CanQualType> &ArgTys); 110 111 void BuildInstanceFunctionParams(CodeGenFunction &CGF, 112 QualType &ResTy, 113 FunctionArgList &Params); 114 115 void EmitInstanceFunctionProlog(CodeGenFunction &CGF); 116 117 llvm::Value *EmitConstructorCall(CodeGenFunction &CGF, 118 const CXXConstructorDecl *D, 119 CXXCtorType Type, bool ForVirtualBase, 120 bool Delegating, 121 llvm::Value *This, 122 CallExpr::const_arg_iterator ArgBeg, 123 CallExpr::const_arg_iterator ArgEnd); 124 125 RValue EmitVirtualDestructorCall(CodeGenFunction &CGF, 126 const CXXDestructorDecl *Dtor, 127 CXXDtorType DtorType, 128 SourceLocation CallLoc, 129 ReturnValueSlot ReturnValue, 130 llvm::Value *This); 131 132 StringRef GetPureVirtualCallName() { return "__cxa_pure_virtual"; } 133 StringRef GetDeletedVirtualCallName() { return "__cxa_deleted_virtual"; } 134 135 CharUnits getArrayCookieSizeImpl(QualType elementType); 136 llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF, 137 llvm::Value *NewPtr, 138 llvm::Value *NumElements, 139 const CXXNewExpr *expr, 140 QualType ElementType); 141 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, 142 llvm::Value *allocPtr, 143 CharUnits cookieSize); 144 145 void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D, 146 llvm::GlobalVariable *DeclPtr, bool PerformInit); 147 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, 148 llvm::Constant *dtor, llvm::Constant *addr); 149}; 150 151class ARMCXXABI : public ItaniumCXXABI { 152public: 153 ARMCXXABI(CodeGen::CodeGenModule &CGM) : ItaniumCXXABI(CGM, /*ARM*/ true) {} 154 155 void BuildConstructorSignature(const CXXConstructorDecl *Ctor, 156 CXXCtorType T, 157 CanQualType &ResTy, 158 SmallVectorImpl<CanQualType> &ArgTys); 159 160 void BuildDestructorSignature(const CXXDestructorDecl *Dtor, 161 CXXDtorType T, 162 CanQualType &ResTy, 163 SmallVectorImpl<CanQualType> &ArgTys); 164 165 void BuildInstanceFunctionParams(CodeGenFunction &CGF, 166 QualType &ResTy, 167 FunctionArgList &Params); 168 169 void EmitInstanceFunctionProlog(CodeGenFunction &CGF); 170 171 void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV, QualType ResTy); 172 173 CharUnits getArrayCookieSizeImpl(QualType elementType); 174 llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF, 175 llvm::Value *NewPtr, 176 llvm::Value *NumElements, 177 const CXXNewExpr *expr, 178 QualType ElementType); 179 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, llvm::Value *allocPtr, 180 CharUnits cookieSize); 181 182 /// \brief Returns true if the given instance method is one of the 183 /// kinds that the ARM ABI says returns 'this'. 184 bool HasThisReturn(GlobalDecl GD) const { 185 const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(GD.getDecl()); 186 if (!MD) return false; 187 return ((isa<CXXDestructorDecl>(MD) && GD.getDtorType() != Dtor_Deleting) || 188 (isa<CXXConstructorDecl>(MD))); 189 } 190}; 191} 192 193CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) { 194 switch (CGM.getTarget().getCXXABI().getKind()) { 195 // For IR-generation purposes, there's no significant difference 196 // between the ARM and iOS ABIs. 197 case TargetCXXABI::GenericARM: 198 case TargetCXXABI::iOS: 199 return new ARMCXXABI(CGM); 200 201 // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't 202 // include the other 32-bit ARM oddities: constructor/destructor return values 203 // and array cookies. 204 case TargetCXXABI::GenericAArch64: 205 return new ItaniumCXXABI(CGM, /*IsARM = */ true); 206 207 case TargetCXXABI::GenericItanium: 208 return new ItaniumCXXABI(CGM); 209 210 case TargetCXXABI::Microsoft: 211 llvm_unreachable("Microsoft ABI is not Itanium-based"); 212 } 213 llvm_unreachable("bad ABI kind"); 214} 215 216llvm::Type * 217ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) { 218 if (MPT->isMemberDataPointer()) 219 return CGM.PtrDiffTy; 220 return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy, NULL); 221} 222 223/// In the Itanium and ARM ABIs, method pointers have the form: 224/// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr; 225/// 226/// In the Itanium ABI: 227/// - method pointers are virtual if (memptr.ptr & 1) is nonzero 228/// - the this-adjustment is (memptr.adj) 229/// - the virtual offset is (memptr.ptr - 1) 230/// 231/// In the ARM ABI: 232/// - method pointers are virtual if (memptr.adj & 1) is nonzero 233/// - the this-adjustment is (memptr.adj >> 1) 234/// - the virtual offset is (memptr.ptr) 235/// ARM uses 'adj' for the virtual flag because Thumb functions 236/// may be only single-byte aligned. 237/// 238/// If the member is virtual, the adjusted 'this' pointer points 239/// to a vtable pointer from which the virtual offset is applied. 240/// 241/// If the member is non-virtual, memptr.ptr is the address of 242/// the function to call. 243llvm::Value * 244ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF, 245 llvm::Value *&This, 246 llvm::Value *MemFnPtr, 247 const MemberPointerType *MPT) { 248 CGBuilderTy &Builder = CGF.Builder; 249 250 const FunctionProtoType *FPT = 251 MPT->getPointeeType()->getAs<FunctionProtoType>(); 252 const CXXRecordDecl *RD = 253 cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl()); 254 255 llvm::FunctionType *FTy = 256 CGM.getTypes().GetFunctionType( 257 CGM.getTypes().arrangeCXXMethodType(RD, FPT)); 258 259 llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1); 260 261 llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual"); 262 llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual"); 263 llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end"); 264 265 // Extract memptr.adj, which is in the second field. 266 llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj"); 267 268 // Compute the true adjustment. 269 llvm::Value *Adj = RawAdj; 270 if (IsARM) 271 Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted"); 272 273 // Apply the adjustment and cast back to the original struct type 274 // for consistency. 275 llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy()); 276 Ptr = Builder.CreateInBoundsGEP(Ptr, Adj); 277 This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted"); 278 279 // Load the function pointer. 280 llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr"); 281 282 // If the LSB in the function pointer is 1, the function pointer points to 283 // a virtual function. 284 llvm::Value *IsVirtual; 285 if (IsARM) 286 IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1); 287 else 288 IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1); 289 IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual"); 290 Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual); 291 292 // In the virtual path, the adjustment left 'This' pointing to the 293 // vtable of the correct base subobject. The "function pointer" is an 294 // offset within the vtable (+1 for the virtual flag on non-ARM). 295 CGF.EmitBlock(FnVirtual); 296 297 // Cast the adjusted this to a pointer to vtable pointer and load. 298 llvm::Type *VTableTy = Builder.getInt8PtrTy(); 299 llvm::Value *VTable = Builder.CreateBitCast(This, VTableTy->getPointerTo()); 300 VTable = Builder.CreateLoad(VTable, "memptr.vtable"); 301 302 // Apply the offset. 303 llvm::Value *VTableOffset = FnAsInt; 304 if (!IsARM) VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1); 305 VTable = Builder.CreateGEP(VTable, VTableOffset); 306 307 // Load the virtual function to call. 308 VTable = Builder.CreateBitCast(VTable, FTy->getPointerTo()->getPointerTo()); 309 llvm::Value *VirtualFn = Builder.CreateLoad(VTable, "memptr.virtualfn"); 310 CGF.EmitBranch(FnEnd); 311 312 // In the non-virtual path, the function pointer is actually a 313 // function pointer. 314 CGF.EmitBlock(FnNonVirtual); 315 llvm::Value *NonVirtualFn = 316 Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn"); 317 318 // We're done. 319 CGF.EmitBlock(FnEnd); 320 llvm::PHINode *Callee = Builder.CreatePHI(FTy->getPointerTo(), 2); 321 Callee->addIncoming(VirtualFn, FnVirtual); 322 Callee->addIncoming(NonVirtualFn, FnNonVirtual); 323 return Callee; 324} 325 326/// Compute an l-value by applying the given pointer-to-member to a 327/// base object. 328llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF, 329 llvm::Value *Base, 330 llvm::Value *MemPtr, 331 const MemberPointerType *MPT) { 332 assert(MemPtr->getType() == CGM.PtrDiffTy); 333 334 CGBuilderTy &Builder = CGF.Builder; 335 336 unsigned AS = Base->getType()->getPointerAddressSpace(); 337 338 // Cast to char*. 339 Base = Builder.CreateBitCast(Base, Builder.getInt8Ty()->getPointerTo(AS)); 340 341 // Apply the offset, which we assume is non-null. 342 llvm::Value *Addr = Builder.CreateInBoundsGEP(Base, MemPtr, "memptr.offset"); 343 344 // Cast the address to the appropriate pointer type, adopting the 345 // address space of the base pointer. 346 llvm::Type *PType 347 = CGF.ConvertTypeForMem(MPT->getPointeeType())->getPointerTo(AS); 348 return Builder.CreateBitCast(Addr, PType); 349} 350 351/// Perform a bitcast, derived-to-base, or base-to-derived member pointer 352/// conversion. 353/// 354/// Bitcast conversions are always a no-op under Itanium. 355/// 356/// Obligatory offset/adjustment diagram: 357/// <-- offset --> <-- adjustment --> 358/// |--------------------------|----------------------|--------------------| 359/// ^Derived address point ^Base address point ^Member address point 360/// 361/// So when converting a base member pointer to a derived member pointer, 362/// we add the offset to the adjustment because the address point has 363/// decreased; and conversely, when converting a derived MP to a base MP 364/// we subtract the offset from the adjustment because the address point 365/// has increased. 366/// 367/// The standard forbids (at compile time) conversion to and from 368/// virtual bases, which is why we don't have to consider them here. 369/// 370/// The standard forbids (at run time) casting a derived MP to a base 371/// MP when the derived MP does not point to a member of the base. 372/// This is why -1 is a reasonable choice for null data member 373/// pointers. 374llvm::Value * 375ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF, 376 const CastExpr *E, 377 llvm::Value *src) { 378 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer || 379 E->getCastKind() == CK_BaseToDerivedMemberPointer || 380 E->getCastKind() == CK_ReinterpretMemberPointer); 381 382 // Under Itanium, reinterprets don't require any additional processing. 383 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src; 384 385 // Use constant emission if we can. 386 if (isa<llvm::Constant>(src)) 387 return EmitMemberPointerConversion(E, cast<llvm::Constant>(src)); 388 389 llvm::Constant *adj = getMemberPointerAdjustment(E); 390 if (!adj) return src; 391 392 CGBuilderTy &Builder = CGF.Builder; 393 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer); 394 395 const MemberPointerType *destTy = 396 E->getType()->castAs<MemberPointerType>(); 397 398 // For member data pointers, this is just a matter of adding the 399 // offset if the source is non-null. 400 if (destTy->isMemberDataPointer()) { 401 llvm::Value *dst; 402 if (isDerivedToBase) 403 dst = Builder.CreateNSWSub(src, adj, "adj"); 404 else 405 dst = Builder.CreateNSWAdd(src, adj, "adj"); 406 407 // Null check. 408 llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType()); 409 llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull"); 410 return Builder.CreateSelect(isNull, src, dst); 411 } 412 413 // The this-adjustment is left-shifted by 1 on ARM. 414 if (IsARM) { 415 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue(); 416 offset <<= 1; 417 adj = llvm::ConstantInt::get(adj->getType(), offset); 418 } 419 420 llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj"); 421 llvm::Value *dstAdj; 422 if (isDerivedToBase) 423 dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj"); 424 else 425 dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj"); 426 427 return Builder.CreateInsertValue(src, dstAdj, 1); 428} 429 430llvm::Constant * 431ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E, 432 llvm::Constant *src) { 433 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer || 434 E->getCastKind() == CK_BaseToDerivedMemberPointer || 435 E->getCastKind() == CK_ReinterpretMemberPointer); 436 437 // Under Itanium, reinterprets don't require any additional processing. 438 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src; 439 440 // If the adjustment is trivial, we don't need to do anything. 441 llvm::Constant *adj = getMemberPointerAdjustment(E); 442 if (!adj) return src; 443 444 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer); 445 446 const MemberPointerType *destTy = 447 E->getType()->castAs<MemberPointerType>(); 448 449 // For member data pointers, this is just a matter of adding the 450 // offset if the source is non-null. 451 if (destTy->isMemberDataPointer()) { 452 // null maps to null. 453 if (src->isAllOnesValue()) return src; 454 455 if (isDerivedToBase) 456 return llvm::ConstantExpr::getNSWSub(src, adj); 457 else 458 return llvm::ConstantExpr::getNSWAdd(src, adj); 459 } 460 461 // The this-adjustment is left-shifted by 1 on ARM. 462 if (IsARM) { 463 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue(); 464 offset <<= 1; 465 adj = llvm::ConstantInt::get(adj->getType(), offset); 466 } 467 468 llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1); 469 llvm::Constant *dstAdj; 470 if (isDerivedToBase) 471 dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj); 472 else 473 dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj); 474 475 return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1); 476} 477 478llvm::Constant * 479ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) { 480 // Itanium C++ ABI 2.3: 481 // A NULL pointer is represented as -1. 482 if (MPT->isMemberDataPointer()) 483 return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true); 484 485 llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0); 486 llvm::Constant *Values[2] = { Zero, Zero }; 487 return llvm::ConstantStruct::getAnon(Values); 488} 489 490llvm::Constant * 491ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT, 492 CharUnits offset) { 493 // Itanium C++ ABI 2.3: 494 // A pointer to data member is an offset from the base address of 495 // the class object containing it, represented as a ptrdiff_t 496 return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity()); 497} 498 499llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) { 500 return BuildMemberPointer(MD, CharUnits::Zero()); 501} 502 503llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD, 504 CharUnits ThisAdjustment) { 505 assert(MD->isInstance() && "Member function must not be static!"); 506 MD = MD->getCanonicalDecl(); 507 508 CodeGenTypes &Types = CGM.getTypes(); 509 510 // Get the function pointer (or index if this is a virtual function). 511 llvm::Constant *MemPtr[2]; 512 if (MD->isVirtual()) { 513 uint64_t Index = CGM.getVTableContext().getMethodVTableIndex(MD); 514 515 const ASTContext &Context = getContext(); 516 CharUnits PointerWidth = 517 Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0)); 518 uint64_t VTableOffset = (Index * PointerWidth.getQuantity()); 519 520 if (IsARM) { 521 // ARM C++ ABI 3.2.1: 522 // This ABI specifies that adj contains twice the this 523 // adjustment, plus 1 if the member function is virtual. The 524 // least significant bit of adj then makes exactly the same 525 // discrimination as the least significant bit of ptr does for 526 // Itanium. 527 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset); 528 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, 529 2 * ThisAdjustment.getQuantity() + 1); 530 } else { 531 // Itanium C++ ABI 2.3: 532 // For a virtual function, [the pointer field] is 1 plus the 533 // virtual table offset (in bytes) of the function, 534 // represented as a ptrdiff_t. 535 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1); 536 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, 537 ThisAdjustment.getQuantity()); 538 } 539 } else { 540 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>(); 541 llvm::Type *Ty; 542 // Check whether the function has a computable LLVM signature. 543 if (Types.isFuncTypeConvertible(FPT)) { 544 // The function has a computable LLVM signature; use the correct type. 545 Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD)); 546 } else { 547 // Use an arbitrary non-function type to tell GetAddrOfFunction that the 548 // function type is incomplete. 549 Ty = CGM.PtrDiffTy; 550 } 551 llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty); 552 553 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy); 554 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, (IsARM ? 2 : 1) * 555 ThisAdjustment.getQuantity()); 556 } 557 558 return llvm::ConstantStruct::getAnon(MemPtr); 559} 560 561llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP, 562 QualType MPType) { 563 const MemberPointerType *MPT = MPType->castAs<MemberPointerType>(); 564 const ValueDecl *MPD = MP.getMemberPointerDecl(); 565 if (!MPD) 566 return EmitNullMemberPointer(MPT); 567 568 // Compute the this-adjustment. 569 CharUnits ThisAdjustment = CharUnits::Zero(); 570 ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath(); 571 bool DerivedMember = MP.isMemberPointerToDerivedMember(); 572 const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext()); 573 for (unsigned I = 0, N = Path.size(); I != N; ++I) { 574 const CXXRecordDecl *Base = RD; 575 const CXXRecordDecl *Derived = Path[I]; 576 if (DerivedMember) 577 std::swap(Base, Derived); 578 ThisAdjustment += 579 getContext().getASTRecordLayout(Derived).getBaseClassOffset(Base); 580 RD = Path[I]; 581 } 582 if (DerivedMember) 583 ThisAdjustment = -ThisAdjustment; 584 585 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD)) 586 return BuildMemberPointer(MD, ThisAdjustment); 587 588 CharUnits FieldOffset = 589 getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD)); 590 return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset); 591} 592 593/// The comparison algorithm is pretty easy: the member pointers are 594/// the same if they're either bitwise identical *or* both null. 595/// 596/// ARM is different here only because null-ness is more complicated. 597llvm::Value * 598ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF, 599 llvm::Value *L, 600 llvm::Value *R, 601 const MemberPointerType *MPT, 602 bool Inequality) { 603 CGBuilderTy &Builder = CGF.Builder; 604 605 llvm::ICmpInst::Predicate Eq; 606 llvm::Instruction::BinaryOps And, Or; 607 if (Inequality) { 608 Eq = llvm::ICmpInst::ICMP_NE; 609 And = llvm::Instruction::Or; 610 Or = llvm::Instruction::And; 611 } else { 612 Eq = llvm::ICmpInst::ICMP_EQ; 613 And = llvm::Instruction::And; 614 Or = llvm::Instruction::Or; 615 } 616 617 // Member data pointers are easy because there's a unique null 618 // value, so it just comes down to bitwise equality. 619 if (MPT->isMemberDataPointer()) 620 return Builder.CreateICmp(Eq, L, R); 621 622 // For member function pointers, the tautologies are more complex. 623 // The Itanium tautology is: 624 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj)) 625 // The ARM tautology is: 626 // (L == R) <==> (L.ptr == R.ptr && 627 // (L.adj == R.adj || 628 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0))) 629 // The inequality tautologies have exactly the same structure, except 630 // applying De Morgan's laws. 631 632 llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr"); 633 llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr"); 634 635 // This condition tests whether L.ptr == R.ptr. This must always be 636 // true for equality to hold. 637 llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr"); 638 639 // This condition, together with the assumption that L.ptr == R.ptr, 640 // tests whether the pointers are both null. ARM imposes an extra 641 // condition. 642 llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType()); 643 llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null"); 644 645 // This condition tests whether L.adj == R.adj. If this isn't 646 // true, the pointers are unequal unless they're both null. 647 llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj"); 648 llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj"); 649 llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj"); 650 651 // Null member function pointers on ARM clear the low bit of Adj, 652 // so the zero condition has to check that neither low bit is set. 653 if (IsARM) { 654 llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1); 655 656 // Compute (l.adj | r.adj) & 1 and test it against zero. 657 llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj"); 658 llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One); 659 llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero, 660 "cmp.or.adj"); 661 EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero); 662 } 663 664 // Tie together all our conditions. 665 llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq); 666 Result = Builder.CreateBinOp(And, PtrEq, Result, 667 Inequality ? "memptr.ne" : "memptr.eq"); 668 return Result; 669} 670 671llvm::Value * 672ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF, 673 llvm::Value *MemPtr, 674 const MemberPointerType *MPT) { 675 CGBuilderTy &Builder = CGF.Builder; 676 677 /// For member data pointers, this is just a check against -1. 678 if (MPT->isMemberDataPointer()) { 679 assert(MemPtr->getType() == CGM.PtrDiffTy); 680 llvm::Value *NegativeOne = 681 llvm::Constant::getAllOnesValue(MemPtr->getType()); 682 return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool"); 683 } 684 685 // In Itanium, a member function pointer is not null if 'ptr' is not null. 686 llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr"); 687 688 llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0); 689 llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool"); 690 691 // On ARM, a member function pointer is also non-null if the low bit of 'adj' 692 // (the virtual bit) is set. 693 if (IsARM) { 694 llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1); 695 llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj"); 696 llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit"); 697 llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero, 698 "memptr.isvirtual"); 699 Result = Builder.CreateOr(Result, IsVirtual); 700 } 701 702 return Result; 703} 704 705/// The Itanium ABI requires non-zero initialization only for data 706/// member pointers, for which '0' is a valid offset. 707bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) { 708 return MPT->getPointeeType()->isFunctionType(); 709} 710 711/// The Itanium ABI always places an offset to the complete object 712/// at entry -2 in the vtable. 713llvm::Value *ItaniumCXXABI::adjustToCompleteObject(CodeGenFunction &CGF, 714 llvm::Value *ptr, 715 QualType type) { 716 // Grab the vtable pointer as an intptr_t*. 717 llvm::Value *vtable = CGF.GetVTablePtr(ptr, CGF.IntPtrTy->getPointerTo()); 718 719 // Track back to entry -2 and pull out the offset there. 720 llvm::Value *offsetPtr = 721 CGF.Builder.CreateConstInBoundsGEP1_64(vtable, -2, "complete-offset.ptr"); 722 llvm::LoadInst *offset = CGF.Builder.CreateLoad(offsetPtr); 723 offset->setAlignment(CGF.PointerAlignInBytes); 724 725 // Apply the offset. 726 ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8PtrTy); 727 return CGF.Builder.CreateInBoundsGEP(ptr, offset); 728} 729 730/// The generic ABI passes 'this', plus a VTT if it's initializing a 731/// base subobject. 732void ItaniumCXXABI::BuildConstructorSignature(const CXXConstructorDecl *Ctor, 733 CXXCtorType Type, 734 CanQualType &ResTy, 735 SmallVectorImpl<CanQualType> &ArgTys) { 736 ASTContext &Context = getContext(); 737 738 // 'this' is already there. 739 740 // Check if we need to add a VTT parameter (which has type void **). 741 if (Type == Ctor_Base && Ctor->getParent()->getNumVBases() != 0) 742 ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy)); 743} 744 745/// The ARM ABI does the same as the Itanium ABI, but returns 'this'. 746void ARMCXXABI::BuildConstructorSignature(const CXXConstructorDecl *Ctor, 747 CXXCtorType Type, 748 CanQualType &ResTy, 749 SmallVectorImpl<CanQualType> &ArgTys) { 750 ItaniumCXXABI::BuildConstructorSignature(Ctor, Type, ResTy, ArgTys); 751 ResTy = ArgTys[0]; 752} 753 754/// The generic ABI passes 'this', plus a VTT if it's destroying a 755/// base subobject. 756void ItaniumCXXABI::BuildDestructorSignature(const CXXDestructorDecl *Dtor, 757 CXXDtorType Type, 758 CanQualType &ResTy, 759 SmallVectorImpl<CanQualType> &ArgTys) { 760 ASTContext &Context = getContext(); 761 762 // 'this' is already there. 763 764 // Check if we need to add a VTT parameter (which has type void **). 765 if (Type == Dtor_Base && Dtor->getParent()->getNumVBases() != 0) 766 ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy)); 767} 768 769/// The ARM ABI does the same as the Itanium ABI, but returns 'this' 770/// for non-deleting destructors. 771void ARMCXXABI::BuildDestructorSignature(const CXXDestructorDecl *Dtor, 772 CXXDtorType Type, 773 CanQualType &ResTy, 774 SmallVectorImpl<CanQualType> &ArgTys) { 775 ItaniumCXXABI::BuildDestructorSignature(Dtor, Type, ResTy, ArgTys); 776 777 if (Type != Dtor_Deleting) 778 ResTy = ArgTys[0]; 779} 780 781void ItaniumCXXABI::BuildInstanceFunctionParams(CodeGenFunction &CGF, 782 QualType &ResTy, 783 FunctionArgList &Params) { 784 /// Create the 'this' variable. 785 BuildThisParam(CGF, Params); 786 787 const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl()); 788 assert(MD->isInstance()); 789 790 // Check if we need a VTT parameter as well. 791 if (CodeGenVTables::needsVTTParameter(CGF.CurGD)) { 792 ASTContext &Context = getContext(); 793 794 // FIXME: avoid the fake decl 795 QualType T = Context.getPointerType(Context.VoidPtrTy); 796 ImplicitParamDecl *VTTDecl 797 = ImplicitParamDecl::Create(Context, 0, MD->getLocation(), 798 &Context.Idents.get("vtt"), T); 799 Params.push_back(VTTDecl); 800 getVTTDecl(CGF) = VTTDecl; 801 } 802} 803 804void ARMCXXABI::BuildInstanceFunctionParams(CodeGenFunction &CGF, 805 QualType &ResTy, 806 FunctionArgList &Params) { 807 ItaniumCXXABI::BuildInstanceFunctionParams(CGF, ResTy, Params); 808 809 // Return 'this' from certain constructors and destructors. 810 if (HasThisReturn(CGF.CurGD)) 811 ResTy = Params[0]->getType(); 812} 813 814void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) { 815 /// Initialize the 'this' slot. 816 EmitThisParam(CGF); 817 818 /// Initialize the 'vtt' slot if needed. 819 if (getVTTDecl(CGF)) { 820 getVTTValue(CGF) 821 = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(getVTTDecl(CGF)), 822 "vtt"); 823 } 824} 825 826void ARMCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) { 827 ItaniumCXXABI::EmitInstanceFunctionProlog(CGF); 828 829 /// Initialize the return slot to 'this' at the start of the 830 /// function. 831 if (HasThisReturn(CGF.CurGD)) 832 CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue); 833} 834 835llvm::Value *ItaniumCXXABI::EmitConstructorCall(CodeGenFunction &CGF, 836 const CXXConstructorDecl *D, 837 CXXCtorType Type, bool ForVirtualBase, 838 bool Delegating, 839 llvm::Value *This, 840 CallExpr::const_arg_iterator ArgBeg, 841 CallExpr::const_arg_iterator ArgEnd) { 842 llvm::Value *VTT = CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, 843 Delegating); 844 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy); 845 llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, Type); 846 847 // FIXME: Provide a source location here. 848 CGF.EmitCXXMemberCall(D, SourceLocation(), Callee, ReturnValueSlot(), This, 849 VTT, VTTTy, ArgBeg, ArgEnd); 850 return Callee; 851} 852 853RValue ItaniumCXXABI::EmitVirtualDestructorCall(CodeGenFunction &CGF, 854 const CXXDestructorDecl *Dtor, 855 CXXDtorType DtorType, 856 SourceLocation CallLoc, 857 ReturnValueSlot ReturnValue, 858 llvm::Value *This) { 859 assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete); 860 861 const CGFunctionInfo *FInfo 862 = &CGM.getTypes().arrangeCXXDestructor(Dtor, DtorType); 863 llvm::Type *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo); 864 llvm::Value *Callee = CGF.BuildVirtualCall(Dtor, DtorType, This, Ty); 865 866 return CGF.EmitCXXMemberCall(Dtor, CallLoc, Callee, ReturnValue, This, 867 /*ImplicitParam=*/0, QualType(), 0, 0); 868} 869 870void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF, 871 RValue RV, QualType ResultType) { 872 if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl())) 873 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType); 874 875 // Destructor thunks in the ARM ABI have indeterminate results. 876 llvm::Type *T = 877 cast<llvm::PointerType>(CGF.ReturnValue->getType())->getElementType(); 878 RValue Undef = RValue::get(llvm::UndefValue::get(T)); 879 return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType); 880} 881 882/************************** Array allocation cookies **************************/ 883 884CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) { 885 // The array cookie is a size_t; pad that up to the element alignment. 886 // The cookie is actually right-justified in that space. 887 return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes), 888 CGM.getContext().getTypeAlignInChars(elementType)); 889} 890 891llvm::Value *ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF, 892 llvm::Value *NewPtr, 893 llvm::Value *NumElements, 894 const CXXNewExpr *expr, 895 QualType ElementType) { 896 assert(requiresArrayCookie(expr)); 897 898 unsigned AS = NewPtr->getType()->getPointerAddressSpace(); 899 900 ASTContext &Ctx = getContext(); 901 QualType SizeTy = Ctx.getSizeType(); 902 CharUnits SizeSize = Ctx.getTypeSizeInChars(SizeTy); 903 904 // The size of the cookie. 905 CharUnits CookieSize = 906 std::max(SizeSize, Ctx.getTypeAlignInChars(ElementType)); 907 assert(CookieSize == getArrayCookieSizeImpl(ElementType)); 908 909 // Compute an offset to the cookie. 910 llvm::Value *CookiePtr = NewPtr; 911 CharUnits CookieOffset = CookieSize - SizeSize; 912 if (!CookieOffset.isZero()) 913 CookiePtr = CGF.Builder.CreateConstInBoundsGEP1_64(CookiePtr, 914 CookieOffset.getQuantity()); 915 916 // Write the number of elements into the appropriate slot. 917 llvm::Value *NumElementsPtr 918 = CGF.Builder.CreateBitCast(CookiePtr, 919 CGF.ConvertType(SizeTy)->getPointerTo(AS)); 920 CGF.Builder.CreateStore(NumElements, NumElementsPtr); 921 922 // Finally, compute a pointer to the actual data buffer by skipping 923 // over the cookie completely. 924 return CGF.Builder.CreateConstInBoundsGEP1_64(NewPtr, 925 CookieSize.getQuantity()); 926} 927 928llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF, 929 llvm::Value *allocPtr, 930 CharUnits cookieSize) { 931 // The element size is right-justified in the cookie. 932 llvm::Value *numElementsPtr = allocPtr; 933 CharUnits numElementsOffset = 934 cookieSize - CharUnits::fromQuantity(CGF.SizeSizeInBytes); 935 if (!numElementsOffset.isZero()) 936 numElementsPtr = 937 CGF.Builder.CreateConstInBoundsGEP1_64(numElementsPtr, 938 numElementsOffset.getQuantity()); 939 940 unsigned AS = allocPtr->getType()->getPointerAddressSpace(); 941 numElementsPtr = 942 CGF.Builder.CreateBitCast(numElementsPtr, CGF.SizeTy->getPointerTo(AS)); 943 return CGF.Builder.CreateLoad(numElementsPtr); 944} 945 946CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) { 947 // ARM says that the cookie is always: 948 // struct array_cookie { 949 // std::size_t element_size; // element_size != 0 950 // std::size_t element_count; 951 // }; 952 // But the base ABI doesn't give anything an alignment greater than 953 // 8, so we can dismiss this as typical ABI-author blindness to 954 // actual language complexity and round up to the element alignment. 955 return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes), 956 CGM.getContext().getTypeAlignInChars(elementType)); 957} 958 959llvm::Value *ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF, 960 llvm::Value *newPtr, 961 llvm::Value *numElements, 962 const CXXNewExpr *expr, 963 QualType elementType) { 964 assert(requiresArrayCookie(expr)); 965 966 // NewPtr is a char*, but we generalize to arbitrary addrspaces. 967 unsigned AS = newPtr->getType()->getPointerAddressSpace(); 968 969 // The cookie is always at the start of the buffer. 970 llvm::Value *cookie = newPtr; 971 972 // The first element is the element size. 973 cookie = CGF.Builder.CreateBitCast(cookie, CGF.SizeTy->getPointerTo(AS)); 974 llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy, 975 getContext().getTypeSizeInChars(elementType).getQuantity()); 976 CGF.Builder.CreateStore(elementSize, cookie); 977 978 // The second element is the element count. 979 cookie = CGF.Builder.CreateConstInBoundsGEP1_32(cookie, 1); 980 CGF.Builder.CreateStore(numElements, cookie); 981 982 // Finally, compute a pointer to the actual data buffer by skipping 983 // over the cookie completely. 984 CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType); 985 return CGF.Builder.CreateConstInBoundsGEP1_64(newPtr, 986 cookieSize.getQuantity()); 987} 988 989llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF, 990 llvm::Value *allocPtr, 991 CharUnits cookieSize) { 992 // The number of elements is at offset sizeof(size_t) relative to 993 // the allocated pointer. 994 llvm::Value *numElementsPtr 995 = CGF.Builder.CreateConstInBoundsGEP1_64(allocPtr, CGF.SizeSizeInBytes); 996 997 unsigned AS = allocPtr->getType()->getPointerAddressSpace(); 998 numElementsPtr = 999 CGF.Builder.CreateBitCast(numElementsPtr, CGF.SizeTy->getPointerTo(AS)); 1000 return CGF.Builder.CreateLoad(numElementsPtr); 1001} 1002 1003/*********************** Static local initialization **************************/ 1004 1005static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM, 1006 llvm::PointerType *GuardPtrTy) { 1007 // int __cxa_guard_acquire(__guard *guard_object); 1008 llvm::FunctionType *FTy = 1009 llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy), 1010 GuardPtrTy, /*isVarArg=*/false); 1011 return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_acquire", 1012 llvm::AttributeSet::get(CGM.getLLVMContext(), 1013 llvm::AttributeSet::FunctionIndex, 1014 llvm::Attribute::NoUnwind)); 1015} 1016 1017static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM, 1018 llvm::PointerType *GuardPtrTy) { 1019 // void __cxa_guard_release(__guard *guard_object); 1020 llvm::FunctionType *FTy = 1021 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false); 1022 return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_release", 1023 llvm::AttributeSet::get(CGM.getLLVMContext(), 1024 llvm::AttributeSet::FunctionIndex, 1025 llvm::Attribute::NoUnwind)); 1026} 1027 1028static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM, 1029 llvm::PointerType *GuardPtrTy) { 1030 // void __cxa_guard_abort(__guard *guard_object); 1031 llvm::FunctionType *FTy = 1032 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false); 1033 return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_abort", 1034 llvm::AttributeSet::get(CGM.getLLVMContext(), 1035 llvm::AttributeSet::FunctionIndex, 1036 llvm::Attribute::NoUnwind)); 1037} 1038 1039namespace { 1040 struct CallGuardAbort : EHScopeStack::Cleanup { 1041 llvm::GlobalVariable *Guard; 1042 CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {} 1043 1044 void Emit(CodeGenFunction &CGF, Flags flags) { 1045 CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()), 1046 Guard); 1047 } 1048 }; 1049} 1050 1051/// The ARM code here follows the Itanium code closely enough that we 1052/// just special-case it at particular places. 1053void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF, 1054 const VarDecl &D, 1055 llvm::GlobalVariable *var, 1056 bool shouldPerformInit) { 1057 CGBuilderTy &Builder = CGF.Builder; 1058 1059 // We only need to use thread-safe statics for local non-TLS variables; 1060 // global initialization is always single-threaded. 1061 bool threadsafe = getContext().getLangOpts().ThreadsafeStatics && 1062 D.isLocalVarDecl() && !D.getTLSKind(); 1063 1064 // If we have a global variable with internal linkage and thread-safe statics 1065 // are disabled, we can just let the guard variable be of type i8. 1066 bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage(); 1067 1068 llvm::IntegerType *guardTy; 1069 if (useInt8GuardVariable) { 1070 guardTy = CGF.Int8Ty; 1071 } else { 1072 // Guard variables are 64 bits in the generic ABI and size width on ARM 1073 // (i.e. 32-bit on AArch32, 64-bit on AArch64). 1074 guardTy = (IsARM ? CGF.SizeTy : CGF.Int64Ty); 1075 } 1076 llvm::PointerType *guardPtrTy = guardTy->getPointerTo(); 1077 1078 // Create the guard variable if we don't already have it (as we 1079 // might if we're double-emitting this function body). 1080 llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D); 1081 if (!guard) { 1082 // Mangle the name for the guard. 1083 SmallString<256> guardName; 1084 { 1085 llvm::raw_svector_ostream out(guardName); 1086 getMangleContext().mangleItaniumGuardVariable(&D, out); 1087 out.flush(); 1088 } 1089 1090 // Create the guard variable with a zero-initializer. 1091 // Just absorb linkage and visibility from the guarded variable. 1092 guard = new llvm::GlobalVariable(CGM.getModule(), guardTy, 1093 false, var->getLinkage(), 1094 llvm::ConstantInt::get(guardTy, 0), 1095 guardName.str()); 1096 guard->setVisibility(var->getVisibility()); 1097 // If the variable is thread-local, so is its guard variable. 1098 guard->setThreadLocalMode(var->getThreadLocalMode()); 1099 1100 CGM.setStaticLocalDeclGuardAddress(&D, guard); 1101 } 1102 1103 // Test whether the variable has completed initialization. 1104 llvm::Value *isInitialized; 1105 1106 // ARM C++ ABI 3.2.3.1: 1107 // To support the potential use of initialization guard variables 1108 // as semaphores that are the target of ARM SWP and LDREX/STREX 1109 // synchronizing instructions we define a static initialization 1110 // guard variable to be a 4-byte aligned, 4- byte word with the 1111 // following inline access protocol. 1112 // #define INITIALIZED 1 1113 // if ((obj_guard & INITIALIZED) != INITIALIZED) { 1114 // if (__cxa_guard_acquire(&obj_guard)) 1115 // ... 1116 // } 1117 if (IsARM && !useInt8GuardVariable) { 1118 llvm::Value *V = Builder.CreateLoad(guard); 1119 llvm::Value *Test1 = llvm::ConstantInt::get(guardTy, 1); 1120 V = Builder.CreateAnd(V, Test1); 1121 isInitialized = Builder.CreateIsNull(V, "guard.uninitialized"); 1122 1123 // Itanium C++ ABI 3.3.2: 1124 // The following is pseudo-code showing how these functions can be used: 1125 // if (obj_guard.first_byte == 0) { 1126 // if ( __cxa_guard_acquire (&obj_guard) ) { 1127 // try { 1128 // ... initialize the object ...; 1129 // } catch (...) { 1130 // __cxa_guard_abort (&obj_guard); 1131 // throw; 1132 // } 1133 // ... queue object destructor with __cxa_atexit() ...; 1134 // __cxa_guard_release (&obj_guard); 1135 // } 1136 // } 1137 } else { 1138 // Load the first byte of the guard variable. 1139 llvm::LoadInst *LI = 1140 Builder.CreateLoad(Builder.CreateBitCast(guard, CGM.Int8PtrTy)); 1141 LI->setAlignment(1); 1142 1143 // Itanium ABI: 1144 // An implementation supporting thread-safety on multiprocessor 1145 // systems must also guarantee that references to the initialized 1146 // object do not occur before the load of the initialization flag. 1147 // 1148 // In LLVM, we do this by marking the load Acquire. 1149 if (threadsafe) 1150 LI->setAtomic(llvm::Acquire); 1151 1152 isInitialized = Builder.CreateIsNull(LI, "guard.uninitialized"); 1153 } 1154 1155 llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check"); 1156 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end"); 1157 1158 // Check if the first byte of the guard variable is zero. 1159 Builder.CreateCondBr(isInitialized, InitCheckBlock, EndBlock); 1160 1161 CGF.EmitBlock(InitCheckBlock); 1162 1163 // Variables used when coping with thread-safe statics and exceptions. 1164 if (threadsafe) { 1165 // Call __cxa_guard_acquire. 1166 llvm::Value *V 1167 = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard); 1168 1169 llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init"); 1170 1171 Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"), 1172 InitBlock, EndBlock); 1173 1174 // Call __cxa_guard_abort along the exceptional edge. 1175 CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard); 1176 1177 CGF.EmitBlock(InitBlock); 1178 } 1179 1180 // Emit the initializer and add a global destructor if appropriate. 1181 CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit); 1182 1183 if (threadsafe) { 1184 // Pop the guard-abort cleanup if we pushed one. 1185 CGF.PopCleanupBlock(); 1186 1187 // Call __cxa_guard_release. This cannot throw. 1188 CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy), guard); 1189 } else { 1190 Builder.CreateStore(llvm::ConstantInt::get(guardTy, 1), guard); 1191 } 1192 1193 CGF.EmitBlock(EndBlock); 1194} 1195 1196/// Register a global destructor using __cxa_atexit. 1197static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF, 1198 llvm::Constant *dtor, 1199 llvm::Constant *addr, 1200 bool TLS) { 1201 const char *Name = TLS ? "__cxa_thread_atexit" : "__cxa_atexit"; 1202 1203 // We're assuming that the destructor function is something we can 1204 // reasonably call with the default CC. Go ahead and cast it to the 1205 // right prototype. 1206 llvm::Type *dtorTy = 1207 llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo(); 1208 1209 // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d); 1210 llvm::Type *paramTys[] = { dtorTy, CGF.Int8PtrTy, CGF.Int8PtrTy }; 1211 llvm::FunctionType *atexitTy = 1212 llvm::FunctionType::get(CGF.IntTy, paramTys, false); 1213 1214 // Fetch the actual function. 1215 llvm::Constant *atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name); 1216 if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit)) 1217 fn->setDoesNotThrow(); 1218 1219 // Create a variable that binds the atexit to this shared object. 1220 llvm::Constant *handle = 1221 CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle"); 1222 1223 llvm::Value *args[] = { 1224 llvm::ConstantExpr::getBitCast(dtor, dtorTy), 1225 llvm::ConstantExpr::getBitCast(addr, CGF.Int8PtrTy), 1226 handle 1227 }; 1228 CGF.EmitNounwindRuntimeCall(atexit, args); 1229} 1230 1231/// Register a global destructor as best as we know how. 1232void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, 1233 const VarDecl &D, 1234 llvm::Constant *dtor, 1235 llvm::Constant *addr) { 1236 // Use __cxa_atexit if available. 1237 if (CGM.getCodeGenOpts().CXAAtExit) 1238 return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind()); 1239 1240 if (D.getTLSKind()) 1241 CGM.ErrorUnsupported(&D, "non-trivial TLS destruction"); 1242 1243 // In Apple kexts, we want to add a global destructor entry. 1244 // FIXME: shouldn't this be guarded by some variable? 1245 if (CGM.getLangOpts().AppleKext) { 1246 // Generate a global destructor entry. 1247 return CGM.AddCXXDtorEntry(dtor, addr); 1248 } 1249 1250 CGF.registerGlobalDtorWithAtExit(dtor, addr); 1251} 1252