CGExprCXX.cpp revision f85e193739c953358c865005855253af4f68a497
1//===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This contains code dealing with code generation of C++ expressions 11// 12//===----------------------------------------------------------------------===// 13 14#include "clang/Frontend/CodeGenOptions.h" 15#include "CodeGenFunction.h" 16#include "CGCXXABI.h" 17#include "CGObjCRuntime.h" 18#include "CGDebugInfo.h" 19#include "llvm/Intrinsics.h" 20#include "llvm/Support/CallSite.h" 21 22using namespace clang; 23using namespace CodeGen; 24 25RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD, 26 llvm::Value *Callee, 27 ReturnValueSlot ReturnValue, 28 llvm::Value *This, 29 llvm::Value *VTT, 30 CallExpr::const_arg_iterator ArgBeg, 31 CallExpr::const_arg_iterator ArgEnd) { 32 assert(MD->isInstance() && 33 "Trying to emit a member call expr on a static method!"); 34 35 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 36 37 CallArgList Args; 38 39 // Push the this ptr. 40 Args.add(RValue::get(This), MD->getThisType(getContext())); 41 42 // If there is a VTT parameter, emit it. 43 if (VTT) { 44 QualType T = getContext().getPointerType(getContext().VoidPtrTy); 45 Args.add(RValue::get(VTT), T); 46 } 47 48 // And the rest of the call args 49 EmitCallArgs(Args, FPT, ArgBeg, ArgEnd); 50 51 QualType ResultType = FPT->getResultType(); 52 return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args, 53 FPT->getExtInfo()), 54 Callee, ReturnValue, Args, MD); 55} 56 57static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) { 58 const Expr *E = Base; 59 60 while (true) { 61 E = E->IgnoreParens(); 62 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { 63 if (CE->getCastKind() == CK_DerivedToBase || 64 CE->getCastKind() == CK_UncheckedDerivedToBase || 65 CE->getCastKind() == CK_NoOp) { 66 E = CE->getSubExpr(); 67 continue; 68 } 69 } 70 71 break; 72 } 73 74 QualType DerivedType = E->getType(); 75 if (const PointerType *PTy = DerivedType->getAs<PointerType>()) 76 DerivedType = PTy->getPointeeType(); 77 78 return cast<CXXRecordDecl>(DerivedType->castAs<RecordType>()->getDecl()); 79} 80 81// FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do 82// quite what we want. 83static const Expr *skipNoOpCastsAndParens(const Expr *E) { 84 while (true) { 85 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) { 86 E = PE->getSubExpr(); 87 continue; 88 } 89 90 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { 91 if (CE->getCastKind() == CK_NoOp) { 92 E = CE->getSubExpr(); 93 continue; 94 } 95 } 96 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 97 if (UO->getOpcode() == UO_Extension) { 98 E = UO->getSubExpr(); 99 continue; 100 } 101 } 102 return E; 103 } 104} 105 106/// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given 107/// expr can be devirtualized. 108static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context, 109 const Expr *Base, 110 const CXXMethodDecl *MD) { 111 112 // When building with -fapple-kext, all calls must go through the vtable since 113 // the kernel linker can do runtime patching of vtables. 114 if (Context.getLangOptions().AppleKext) 115 return false; 116 117 // If the most derived class is marked final, we know that no subclass can 118 // override this member function and so we can devirtualize it. For example: 119 // 120 // struct A { virtual void f(); } 121 // struct B final : A { }; 122 // 123 // void f(B *b) { 124 // b->f(); 125 // } 126 // 127 const CXXRecordDecl *MostDerivedClassDecl = getMostDerivedClassDecl(Base); 128 if (MostDerivedClassDecl->hasAttr<FinalAttr>()) 129 return true; 130 131 // If the member function is marked 'final', we know that it can't be 132 // overridden and can therefore devirtualize it. 133 if (MD->hasAttr<FinalAttr>()) 134 return true; 135 136 // Similarly, if the class itself is marked 'final' it can't be overridden 137 // and we can therefore devirtualize the member function call. 138 if (MD->getParent()->hasAttr<FinalAttr>()) 139 return true; 140 141 Base = skipNoOpCastsAndParens(Base); 142 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) { 143 if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) { 144 // This is a record decl. We know the type and can devirtualize it. 145 return VD->getType()->isRecordType(); 146 } 147 148 return false; 149 } 150 151 // We can always devirtualize calls on temporary object expressions. 152 if (isa<CXXConstructExpr>(Base)) 153 return true; 154 155 // And calls on bound temporaries. 156 if (isa<CXXBindTemporaryExpr>(Base)) 157 return true; 158 159 // Check if this is a call expr that returns a record type. 160 if (const CallExpr *CE = dyn_cast<CallExpr>(Base)) 161 return CE->getCallReturnType()->isRecordType(); 162 163 // We can't devirtualize the call. 164 return false; 165} 166 167// Note: This function also emit constructor calls to support a MSVC 168// extensions allowing explicit constructor function call. 169RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE, 170 ReturnValueSlot ReturnValue) { 171 const Expr *callee = CE->getCallee()->IgnoreParens(); 172 173 if (isa<BinaryOperator>(callee)) 174 return EmitCXXMemberPointerCallExpr(CE, ReturnValue); 175 176 const MemberExpr *ME = cast<MemberExpr>(callee); 177 const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl()); 178 179 CGDebugInfo *DI = getDebugInfo(); 180 if (DI && CGM.getCodeGenOpts().LimitDebugInfo 181 && !isa<CallExpr>(ME->getBase())) { 182 QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType(); 183 if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) { 184 DI->getOrCreateRecordType(PTy->getPointeeType(), 185 MD->getParent()->getLocation()); 186 } 187 } 188 189 if (MD->isStatic()) { 190 // The method is static, emit it as we would a regular call. 191 llvm::Value *Callee = CGM.GetAddrOfFunction(MD); 192 return EmitCall(getContext().getPointerType(MD->getType()), Callee, 193 ReturnValue, CE->arg_begin(), CE->arg_end()); 194 } 195 196 // Compute the object pointer. 197 llvm::Value *This; 198 if (ME->isArrow()) 199 This = EmitScalarExpr(ME->getBase()); 200 else 201 This = EmitLValue(ME->getBase()).getAddress(); 202 203 if (MD->isTrivial()) { 204 if (isa<CXXDestructorDecl>(MD)) return RValue::get(0); 205 if (isa<CXXConstructorDecl>(MD) && 206 cast<CXXConstructorDecl>(MD)->isDefaultConstructor()) 207 return RValue::get(0); 208 209 if (MD->isCopyAssignmentOperator()) { 210 // We don't like to generate the trivial copy assignment operator when 211 // it isn't necessary; just produce the proper effect here. 212 llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress(); 213 EmitAggregateCopy(This, RHS, CE->getType()); 214 return RValue::get(This); 215 } 216 217 if (isa<CXXConstructorDecl>(MD) && 218 cast<CXXConstructorDecl>(MD)->isCopyConstructor()) { 219 llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress(); 220 EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS, 221 CE->arg_begin(), CE->arg_end()); 222 return RValue::get(This); 223 } 224 llvm_unreachable("unknown trivial member function"); 225 } 226 227 // Compute the function type we're calling. 228 const CGFunctionInfo *FInfo = 0; 229 if (isa<CXXDestructorDecl>(MD)) 230 FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXDestructorDecl>(MD), 231 Dtor_Complete); 232 else if (isa<CXXConstructorDecl>(MD)) 233 FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXConstructorDecl>(MD), 234 Ctor_Complete); 235 else 236 FInfo = &CGM.getTypes().getFunctionInfo(MD); 237 238 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 239 const llvm::Type *Ty 240 = CGM.getTypes().GetFunctionType(*FInfo, FPT->isVariadic()); 241 242 // C++ [class.virtual]p12: 243 // Explicit qualification with the scope operator (5.1) suppresses the 244 // virtual call mechanism. 245 // 246 // We also don't emit a virtual call if the base expression has a record type 247 // because then we know what the type is. 248 bool UseVirtualCall; 249 UseVirtualCall = MD->isVirtual() && !ME->hasQualifier() 250 && !canDevirtualizeMemberFunctionCalls(getContext(), 251 ME->getBase(), MD); 252 llvm::Value *Callee; 253 if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) { 254 if (UseVirtualCall) { 255 Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty); 256 } else { 257 if (getContext().getLangOptions().AppleKext && 258 MD->isVirtual() && 259 ME->hasQualifier()) 260 Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty); 261 else 262 Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty); 263 } 264 } else if (const CXXConstructorDecl *Ctor = 265 dyn_cast<CXXConstructorDecl>(MD)) { 266 Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty); 267 } else if (UseVirtualCall) { 268 Callee = BuildVirtualCall(MD, This, Ty); 269 } else { 270 if (getContext().getLangOptions().AppleKext && 271 MD->isVirtual() && 272 ME->hasQualifier()) 273 Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty); 274 else 275 Callee = CGM.GetAddrOfFunction(MD, Ty); 276 } 277 278 return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0, 279 CE->arg_begin(), CE->arg_end()); 280} 281 282RValue 283CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, 284 ReturnValueSlot ReturnValue) { 285 const BinaryOperator *BO = 286 cast<BinaryOperator>(E->getCallee()->IgnoreParens()); 287 const Expr *BaseExpr = BO->getLHS(); 288 const Expr *MemFnExpr = BO->getRHS(); 289 290 const MemberPointerType *MPT = 291 MemFnExpr->getType()->castAs<MemberPointerType>(); 292 293 const FunctionProtoType *FPT = 294 MPT->getPointeeType()->castAs<FunctionProtoType>(); 295 const CXXRecordDecl *RD = 296 cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl()); 297 298 // Get the member function pointer. 299 llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr); 300 301 // Emit the 'this' pointer. 302 llvm::Value *This; 303 304 if (BO->getOpcode() == BO_PtrMemI) 305 This = EmitScalarExpr(BaseExpr); 306 else 307 This = EmitLValue(BaseExpr).getAddress(); 308 309 // Ask the ABI to load the callee. Note that This is modified. 310 llvm::Value *Callee = 311 CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, This, MemFnPtr, MPT); 312 313 CallArgList Args; 314 315 QualType ThisType = 316 getContext().getPointerType(getContext().getTagDeclType(RD)); 317 318 // Push the this ptr. 319 Args.add(RValue::get(This), ThisType); 320 321 // And the rest of the call args 322 EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end()); 323 return EmitCall(CGM.getTypes().getFunctionInfo(Args, FPT), Callee, 324 ReturnValue, Args); 325} 326 327RValue 328CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, 329 const CXXMethodDecl *MD, 330 ReturnValueSlot ReturnValue) { 331 assert(MD->isInstance() && 332 "Trying to emit a member call expr on a static method!"); 333 LValue LV = EmitLValue(E->getArg(0)); 334 llvm::Value *This = LV.getAddress(); 335 336 if (MD->isCopyAssignmentOperator()) { 337 const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(MD->getDeclContext()); 338 if (ClassDecl->hasTrivialCopyAssignment()) { 339 assert(!ClassDecl->hasUserDeclaredCopyAssignment() && 340 "EmitCXXOperatorMemberCallExpr - user declared copy assignment"); 341 llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress(); 342 QualType Ty = E->getType(); 343 EmitAggregateCopy(This, Src, Ty); 344 return RValue::get(This); 345 } 346 } 347 348 llvm::Value *Callee = EmitCXXOperatorMemberCallee(E, MD, This); 349 return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0, 350 E->arg_begin() + 1, E->arg_end()); 351} 352 353void 354CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E, 355 AggValueSlot Dest) { 356 assert(!Dest.isIgnored() && "Must have a destination!"); 357 const CXXConstructorDecl *CD = E->getConstructor(); 358 359 // If we require zero initialization before (or instead of) calling the 360 // constructor, as can be the case with a non-user-provided default 361 // constructor, emit the zero initialization now, unless destination is 362 // already zeroed. 363 if (E->requiresZeroInitialization() && !Dest.isZeroed()) 364 EmitNullInitialization(Dest.getAddr(), E->getType()); 365 366 // If this is a call to a trivial default constructor, do nothing. 367 if (CD->isTrivial() && CD->isDefaultConstructor()) 368 return; 369 370 // Elide the constructor if we're constructing from a temporary. 371 // The temporary check is required because Sema sets this on NRVO 372 // returns. 373 if (getContext().getLangOptions().ElideConstructors && E->isElidable()) { 374 assert(getContext().hasSameUnqualifiedType(E->getType(), 375 E->getArg(0)->getType())); 376 if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) { 377 EmitAggExpr(E->getArg(0), Dest); 378 return; 379 } 380 } 381 382 const ConstantArrayType *Array 383 = getContext().getAsConstantArrayType(E->getType()); 384 if (Array) { 385 QualType BaseElementTy = getContext().getBaseElementType(Array); 386 const llvm::Type *BasePtr = ConvertType(BaseElementTy); 387 BasePtr = llvm::PointerType::getUnqual(BasePtr); 388 llvm::Value *BaseAddrPtr = 389 Builder.CreateBitCast(Dest.getAddr(), BasePtr); 390 391 EmitCXXAggrConstructorCall(CD, Array, BaseAddrPtr, 392 E->arg_begin(), E->arg_end()); 393 } 394 else { 395 CXXCtorType Type = Ctor_Complete; 396 bool ForVirtualBase = false; 397 398 switch (E->getConstructionKind()) { 399 case CXXConstructExpr::CK_Delegating: 400 // We should be emitting a constructor; GlobalDecl will assert this 401 Type = CurGD.getCtorType(); 402 break; 403 404 case CXXConstructExpr::CK_Complete: 405 Type = Ctor_Complete; 406 break; 407 408 case CXXConstructExpr::CK_VirtualBase: 409 ForVirtualBase = true; 410 // fall-through 411 412 case CXXConstructExpr::CK_NonVirtualBase: 413 Type = Ctor_Base; 414 } 415 416 // Call the constructor. 417 EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest.getAddr(), 418 E->arg_begin(), E->arg_end()); 419 } 420} 421 422void 423CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest, 424 llvm::Value *Src, 425 const Expr *Exp) { 426 if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp)) 427 Exp = E->getSubExpr(); 428 assert(isa<CXXConstructExpr>(Exp) && 429 "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr"); 430 const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp); 431 const CXXConstructorDecl *CD = E->getConstructor(); 432 RunCleanupsScope Scope(*this); 433 434 // If we require zero initialization before (or instead of) calling the 435 // constructor, as can be the case with a non-user-provided default 436 // constructor, emit the zero initialization now. 437 // FIXME. Do I still need this for a copy ctor synthesis? 438 if (E->requiresZeroInitialization()) 439 EmitNullInitialization(Dest, E->getType()); 440 441 assert(!getContext().getAsConstantArrayType(E->getType()) 442 && "EmitSynthesizedCXXCopyCtor - Copied-in Array"); 443 EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, 444 E->arg_begin(), E->arg_end()); 445} 446 447static CharUnits CalculateCookiePadding(CodeGenFunction &CGF, 448 const CXXNewExpr *E) { 449 if (!E->isArray()) 450 return CharUnits::Zero(); 451 452 // No cookie is required if the operator new[] being used is the 453 // reserved placement operator new[]. 454 if (E->getOperatorNew()->isReservedGlobalPlacementOperator()) 455 return CharUnits::Zero(); 456 457 return CGF.CGM.getCXXABI().GetArrayCookieSize(E); 458} 459 460static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF, 461 const CXXNewExpr *e, 462 llvm::Value *&numElements, 463 llvm::Value *&sizeWithoutCookie) { 464 QualType type = e->getAllocatedType(); 465 466 if (!e->isArray()) { 467 CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type); 468 sizeWithoutCookie 469 = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity()); 470 return sizeWithoutCookie; 471 } 472 473 // The width of size_t. 474 unsigned sizeWidth = CGF.SizeTy->getBitWidth(); 475 476 // Figure out the cookie size. 477 llvm::APInt cookieSize(sizeWidth, 478 CalculateCookiePadding(CGF, e).getQuantity()); 479 480 // Emit the array size expression. 481 // We multiply the size of all dimensions for NumElements. 482 // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6. 483 numElements = CGF.EmitScalarExpr(e->getArraySize()); 484 assert(isa<llvm::IntegerType>(numElements->getType())); 485 486 // The number of elements can be have an arbitrary integer type; 487 // essentially, we need to multiply it by a constant factor, add a 488 // cookie size, and verify that the result is representable as a 489 // size_t. That's just a gloss, though, and it's wrong in one 490 // important way: if the count is negative, it's an error even if 491 // the cookie size would bring the total size >= 0. 492 bool isSigned 493 = e->getArraySize()->getType()->isSignedIntegerOrEnumerationType(); 494 const llvm::IntegerType *numElementsType 495 = cast<llvm::IntegerType>(numElements->getType()); 496 unsigned numElementsWidth = numElementsType->getBitWidth(); 497 498 // Compute the constant factor. 499 llvm::APInt arraySizeMultiplier(sizeWidth, 1); 500 while (const ConstantArrayType *CAT 501 = CGF.getContext().getAsConstantArrayType(type)) { 502 type = CAT->getElementType(); 503 arraySizeMultiplier *= CAT->getSize(); 504 } 505 506 CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type); 507 llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity()); 508 typeSizeMultiplier *= arraySizeMultiplier; 509 510 // This will be a size_t. 511 llvm::Value *size; 512 513 // If someone is doing 'new int[42]' there is no need to do a dynamic check. 514 // Don't bloat the -O0 code. 515 if (llvm::ConstantInt *numElementsC = 516 dyn_cast<llvm::ConstantInt>(numElements)) { 517 const llvm::APInt &count = numElementsC->getValue(); 518 519 bool hasAnyOverflow = false; 520 521 // If 'count' was a negative number, it's an overflow. 522 if (isSigned && count.isNegative()) 523 hasAnyOverflow = true; 524 525 // We want to do all this arithmetic in size_t. If numElements is 526 // wider than that, check whether it's already too big, and if so, 527 // overflow. 528 else if (numElementsWidth > sizeWidth && 529 numElementsWidth - sizeWidth > count.countLeadingZeros()) 530 hasAnyOverflow = true; 531 532 // Okay, compute a count at the right width. 533 llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth); 534 535 // Scale numElements by that. This might overflow, but we don't 536 // care because it only overflows if allocationSize does, too, and 537 // if that overflows then we shouldn't use this. 538 numElements = llvm::ConstantInt::get(CGF.SizeTy, 539 adjustedCount * arraySizeMultiplier); 540 541 // Compute the size before cookie, and track whether it overflowed. 542 bool overflow; 543 llvm::APInt allocationSize 544 = adjustedCount.umul_ov(typeSizeMultiplier, overflow); 545 hasAnyOverflow |= overflow; 546 547 // Add in the cookie, and check whether it's overflowed. 548 if (cookieSize != 0) { 549 // Save the current size without a cookie. This shouldn't be 550 // used if there was overflow. 551 sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize); 552 553 allocationSize = allocationSize.uadd_ov(cookieSize, overflow); 554 hasAnyOverflow |= overflow; 555 } 556 557 // On overflow, produce a -1 so operator new will fail. 558 if (hasAnyOverflow) { 559 size = llvm::Constant::getAllOnesValue(CGF.SizeTy); 560 } else { 561 size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize); 562 } 563 564 // Otherwise, we might need to use the overflow intrinsics. 565 } else { 566 // There are up to four conditions we need to test for: 567 // 1) if isSigned, we need to check whether numElements is negative; 568 // 2) if numElementsWidth > sizeWidth, we need to check whether 569 // numElements is larger than something representable in size_t; 570 // 3) we need to compute 571 // sizeWithoutCookie := numElements * typeSizeMultiplier 572 // and check whether it overflows; and 573 // 4) if we need a cookie, we need to compute 574 // size := sizeWithoutCookie + cookieSize 575 // and check whether it overflows. 576 577 llvm::Value *hasOverflow = 0; 578 579 // If numElementsWidth > sizeWidth, then one way or another, we're 580 // going to have to do a comparison for (2), and this happens to 581 // take care of (1), too. 582 if (numElementsWidth > sizeWidth) { 583 llvm::APInt threshold(numElementsWidth, 1); 584 threshold <<= sizeWidth; 585 586 llvm::Value *thresholdV 587 = llvm::ConstantInt::get(numElementsType, threshold); 588 589 hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV); 590 numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy); 591 592 // Otherwise, if we're signed, we want to sext up to size_t. 593 } else if (isSigned) { 594 if (numElementsWidth < sizeWidth) 595 numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy); 596 597 // If there's a non-1 type size multiplier, then we can do the 598 // signedness check at the same time as we do the multiply 599 // because a negative number times anything will cause an 600 // unsigned overflow. Otherwise, we have to do it here. 601 if (typeSizeMultiplier == 1) 602 hasOverflow = CGF.Builder.CreateICmpSLT(numElements, 603 llvm::ConstantInt::get(CGF.SizeTy, 0)); 604 605 // Otherwise, zext up to size_t if necessary. 606 } else if (numElementsWidth < sizeWidth) { 607 numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy); 608 } 609 610 assert(numElements->getType() == CGF.SizeTy); 611 612 size = numElements; 613 614 // Multiply by the type size if necessary. This multiplier 615 // includes all the factors for nested arrays. 616 // 617 // This step also causes numElements to be scaled up by the 618 // nested-array factor if necessary. Overflow on this computation 619 // can be ignored because the result shouldn't be used if 620 // allocation fails. 621 if (typeSizeMultiplier != 1) { 622 const llvm::Type *intrinsicTypes[] = { CGF.SizeTy }; 623 llvm::Value *umul_with_overflow 624 = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, 625 intrinsicTypes, 1); 626 627 llvm::Value *tsmV = 628 llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier); 629 llvm::Value *result = 630 CGF.Builder.CreateCall2(umul_with_overflow, size, tsmV); 631 632 llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1); 633 if (hasOverflow) 634 hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed); 635 else 636 hasOverflow = overflowed; 637 638 size = CGF.Builder.CreateExtractValue(result, 0); 639 640 // Also scale up numElements by the array size multiplier. 641 if (arraySizeMultiplier != 1) { 642 // If the base element type size is 1, then we can re-use the 643 // multiply we just did. 644 if (typeSize.isOne()) { 645 assert(arraySizeMultiplier == typeSizeMultiplier); 646 numElements = size; 647 648 // Otherwise we need a separate multiply. 649 } else { 650 llvm::Value *asmV = 651 llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier); 652 numElements = CGF.Builder.CreateMul(numElements, asmV); 653 } 654 } 655 } else { 656 // numElements doesn't need to be scaled. 657 assert(arraySizeMultiplier == 1); 658 } 659 660 // Add in the cookie size if necessary. 661 if (cookieSize != 0) { 662 sizeWithoutCookie = size; 663 664 const llvm::Type *intrinsicTypes[] = { CGF.SizeTy }; 665 llvm::Value *uadd_with_overflow 666 = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, 667 intrinsicTypes, 1); 668 669 llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize); 670 llvm::Value *result = 671 CGF.Builder.CreateCall2(uadd_with_overflow, size, cookieSizeV); 672 673 llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1); 674 if (hasOverflow) 675 hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed); 676 else 677 hasOverflow = overflowed; 678 679 size = CGF.Builder.CreateExtractValue(result, 0); 680 } 681 682 // If we had any possibility of dynamic overflow, make a select to 683 // overwrite 'size' with an all-ones value, which should cause 684 // operator new to throw. 685 if (hasOverflow) 686 size = CGF.Builder.CreateSelect(hasOverflow, 687 llvm::Constant::getAllOnesValue(CGF.SizeTy), 688 size); 689 } 690 691 if (cookieSize == 0) 692 sizeWithoutCookie = size; 693 else 694 assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?"); 695 696 return size; 697} 698 699static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E, 700 llvm::Value *NewPtr) { 701 702 assert(E->getNumConstructorArgs() == 1 && 703 "Can only have one argument to initializer of POD type."); 704 705 const Expr *Init = E->getConstructorArg(0); 706 QualType AllocType = E->getAllocatedType(); 707 708 unsigned Alignment = 709 CGF.getContext().getTypeAlignInChars(AllocType).getQuantity(); 710 if (!CGF.hasAggregateLLVMType(AllocType)) 711 CGF.EmitScalarInit(Init, 0, NewPtr, false, AllocType.isVolatileQualified(), 712 Alignment, AllocType); 713 else if (AllocType->isAnyComplexType()) 714 CGF.EmitComplexExprIntoAddr(Init, NewPtr, 715 AllocType.isVolatileQualified()); 716 else { 717 AggValueSlot Slot 718 = AggValueSlot::forAddr(NewPtr, AllocType.getQualifiers(), true); 719 CGF.EmitAggExpr(Init, Slot); 720 } 721} 722 723void 724CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E, 725 llvm::Value *NewPtr, 726 llvm::Value *NumElements) { 727 // We have a POD type. 728 if (E->getNumConstructorArgs() == 0) 729 return; 730 731 const llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); 732 733 // Create a temporary for the loop index and initialize it with 0. 734 llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index"); 735 llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy); 736 Builder.CreateStore(Zero, IndexPtr); 737 738 // Start the loop with a block that tests the condition. 739 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond"); 740 llvm::BasicBlock *AfterFor = createBasicBlock("for.end"); 741 742 EmitBlock(CondBlock); 743 744 llvm::BasicBlock *ForBody = createBasicBlock("for.body"); 745 746 // Generate: if (loop-index < number-of-elements fall to the loop body, 747 // otherwise, go to the block after the for-loop. 748 llvm::Value *Counter = Builder.CreateLoad(IndexPtr); 749 llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElements, "isless"); 750 // If the condition is true, execute the body. 751 Builder.CreateCondBr(IsLess, ForBody, AfterFor); 752 753 EmitBlock(ForBody); 754 755 llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc"); 756 // Inside the loop body, emit the constructor call on the array element. 757 Counter = Builder.CreateLoad(IndexPtr); 758 llvm::Value *Address = Builder.CreateInBoundsGEP(NewPtr, Counter, 759 "arrayidx"); 760 StoreAnyExprIntoOneUnit(*this, E, Address); 761 762 EmitBlock(ContinueBlock); 763 764 // Emit the increment of the loop counter. 765 llvm::Value *NextVal = llvm::ConstantInt::get(SizeTy, 1); 766 Counter = Builder.CreateLoad(IndexPtr); 767 NextVal = Builder.CreateAdd(Counter, NextVal, "inc"); 768 Builder.CreateStore(NextVal, IndexPtr); 769 770 // Finally, branch back up to the condition for the next iteration. 771 EmitBranch(CondBlock); 772 773 // Emit the fall-through block. 774 EmitBlock(AfterFor, true); 775} 776 777static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T, 778 llvm::Value *NewPtr, llvm::Value *Size) { 779 CGF.EmitCastToVoidPtr(NewPtr); 780 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(T); 781 CGF.Builder.CreateMemSet(NewPtr, CGF.Builder.getInt8(0), Size, 782 Alignment.getQuantity(), false); 783} 784 785static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E, 786 llvm::Value *NewPtr, 787 llvm::Value *NumElements, 788 llvm::Value *AllocSizeWithoutCookie) { 789 if (E->isArray()) { 790 if (CXXConstructorDecl *Ctor = E->getConstructor()) { 791 bool RequiresZeroInitialization = false; 792 if (Ctor->getParent()->hasTrivialDefaultConstructor()) { 793 // If new expression did not specify value-initialization, then there 794 // is no initialization. 795 if (!E->hasInitializer() || Ctor->getParent()->isEmpty()) 796 return; 797 798 if (CGF.CGM.getTypes().isZeroInitializable(E->getAllocatedType())) { 799 // Optimization: since zero initialization will just set the memory 800 // to all zeroes, generate a single memset to do it in one shot. 801 EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr, 802 AllocSizeWithoutCookie); 803 return; 804 } 805 806 RequiresZeroInitialization = true; 807 } 808 809 CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr, 810 E->constructor_arg_begin(), 811 E->constructor_arg_end(), 812 RequiresZeroInitialization); 813 return; 814 } else if (E->getNumConstructorArgs() == 1 && 815 isa<ImplicitValueInitExpr>(E->getConstructorArg(0))) { 816 // Optimization: since zero initialization will just set the memory 817 // to all zeroes, generate a single memset to do it in one shot. 818 EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr, 819 AllocSizeWithoutCookie); 820 return; 821 } else { 822 CGF.EmitNewArrayInitializer(E, NewPtr, NumElements); 823 return; 824 } 825 } 826 827 if (CXXConstructorDecl *Ctor = E->getConstructor()) { 828 // Per C++ [expr.new]p15, if we have an initializer, then we're performing 829 // direct initialization. C++ [dcl.init]p5 requires that we 830 // zero-initialize storage if there are no user-declared constructors. 831 if (E->hasInitializer() && 832 !Ctor->getParent()->hasUserDeclaredConstructor() && 833 !Ctor->getParent()->isEmpty()) 834 CGF.EmitNullInitialization(NewPtr, E->getAllocatedType()); 835 836 CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false, 837 NewPtr, E->constructor_arg_begin(), 838 E->constructor_arg_end()); 839 840 return; 841 } 842 // We have a POD type. 843 if (E->getNumConstructorArgs() == 0) 844 return; 845 846 StoreAnyExprIntoOneUnit(CGF, E, NewPtr); 847} 848 849namespace { 850 /// A cleanup to call the given 'operator delete' function upon 851 /// abnormal exit from a new expression. 852 class CallDeleteDuringNew : public EHScopeStack::Cleanup { 853 size_t NumPlacementArgs; 854 const FunctionDecl *OperatorDelete; 855 llvm::Value *Ptr; 856 llvm::Value *AllocSize; 857 858 RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); } 859 860 public: 861 static size_t getExtraSize(size_t NumPlacementArgs) { 862 return NumPlacementArgs * sizeof(RValue); 863 } 864 865 CallDeleteDuringNew(size_t NumPlacementArgs, 866 const FunctionDecl *OperatorDelete, 867 llvm::Value *Ptr, 868 llvm::Value *AllocSize) 869 : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete), 870 Ptr(Ptr), AllocSize(AllocSize) {} 871 872 void setPlacementArg(unsigned I, RValue Arg) { 873 assert(I < NumPlacementArgs && "index out of range"); 874 getPlacementArgs()[I] = Arg; 875 } 876 877 void Emit(CodeGenFunction &CGF, bool IsForEH) { 878 const FunctionProtoType *FPT 879 = OperatorDelete->getType()->getAs<FunctionProtoType>(); 880 assert(FPT->getNumArgs() == NumPlacementArgs + 1 || 881 (FPT->getNumArgs() == 2 && NumPlacementArgs == 0)); 882 883 CallArgList DeleteArgs; 884 885 // The first argument is always a void*. 886 FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin(); 887 DeleteArgs.add(RValue::get(Ptr), *AI++); 888 889 // A member 'operator delete' can take an extra 'size_t' argument. 890 if (FPT->getNumArgs() == NumPlacementArgs + 2) 891 DeleteArgs.add(RValue::get(AllocSize), *AI++); 892 893 // Pass the rest of the arguments, which must match exactly. 894 for (unsigned I = 0; I != NumPlacementArgs; ++I) 895 DeleteArgs.add(getPlacementArgs()[I], *AI++); 896 897 // Call 'operator delete'. 898 CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT), 899 CGF.CGM.GetAddrOfFunction(OperatorDelete), 900 ReturnValueSlot(), DeleteArgs, OperatorDelete); 901 } 902 }; 903 904 /// A cleanup to call the given 'operator delete' function upon 905 /// abnormal exit from a new expression when the new expression is 906 /// conditional. 907 class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup { 908 size_t NumPlacementArgs; 909 const FunctionDecl *OperatorDelete; 910 DominatingValue<RValue>::saved_type Ptr; 911 DominatingValue<RValue>::saved_type AllocSize; 912 913 DominatingValue<RValue>::saved_type *getPlacementArgs() { 914 return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1); 915 } 916 917 public: 918 static size_t getExtraSize(size_t NumPlacementArgs) { 919 return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type); 920 } 921 922 CallDeleteDuringConditionalNew(size_t NumPlacementArgs, 923 const FunctionDecl *OperatorDelete, 924 DominatingValue<RValue>::saved_type Ptr, 925 DominatingValue<RValue>::saved_type AllocSize) 926 : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete), 927 Ptr(Ptr), AllocSize(AllocSize) {} 928 929 void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) { 930 assert(I < NumPlacementArgs && "index out of range"); 931 getPlacementArgs()[I] = Arg; 932 } 933 934 void Emit(CodeGenFunction &CGF, bool IsForEH) { 935 const FunctionProtoType *FPT 936 = OperatorDelete->getType()->getAs<FunctionProtoType>(); 937 assert(FPT->getNumArgs() == NumPlacementArgs + 1 || 938 (FPT->getNumArgs() == 2 && NumPlacementArgs == 0)); 939 940 CallArgList DeleteArgs; 941 942 // The first argument is always a void*. 943 FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin(); 944 DeleteArgs.add(Ptr.restore(CGF), *AI++); 945 946 // A member 'operator delete' can take an extra 'size_t' argument. 947 if (FPT->getNumArgs() == NumPlacementArgs + 2) { 948 RValue RV = AllocSize.restore(CGF); 949 DeleteArgs.add(RV, *AI++); 950 } 951 952 // Pass the rest of the arguments, which must match exactly. 953 for (unsigned I = 0; I != NumPlacementArgs; ++I) { 954 RValue RV = getPlacementArgs()[I].restore(CGF); 955 DeleteArgs.add(RV, *AI++); 956 } 957 958 // Call 'operator delete'. 959 CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT), 960 CGF.CGM.GetAddrOfFunction(OperatorDelete), 961 ReturnValueSlot(), DeleteArgs, OperatorDelete); 962 } 963 }; 964} 965 966/// Enter a cleanup to call 'operator delete' if the initializer in a 967/// new-expression throws. 968static void EnterNewDeleteCleanup(CodeGenFunction &CGF, 969 const CXXNewExpr *E, 970 llvm::Value *NewPtr, 971 llvm::Value *AllocSize, 972 const CallArgList &NewArgs) { 973 // If we're not inside a conditional branch, then the cleanup will 974 // dominate and we can do the easier (and more efficient) thing. 975 if (!CGF.isInConditionalBranch()) { 976 CallDeleteDuringNew *Cleanup = CGF.EHStack 977 .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup, 978 E->getNumPlacementArgs(), 979 E->getOperatorDelete(), 980 NewPtr, AllocSize); 981 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) 982 Cleanup->setPlacementArg(I, NewArgs[I+1].RV); 983 984 return; 985 } 986 987 // Otherwise, we need to save all this stuff. 988 DominatingValue<RValue>::saved_type SavedNewPtr = 989 DominatingValue<RValue>::save(CGF, RValue::get(NewPtr)); 990 DominatingValue<RValue>::saved_type SavedAllocSize = 991 DominatingValue<RValue>::save(CGF, RValue::get(AllocSize)); 992 993 CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack 994 .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(InactiveEHCleanup, 995 E->getNumPlacementArgs(), 996 E->getOperatorDelete(), 997 SavedNewPtr, 998 SavedAllocSize); 999 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) 1000 Cleanup->setPlacementArg(I, 1001 DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV)); 1002 1003 CGF.ActivateCleanupBlock(CGF.EHStack.stable_begin()); 1004} 1005 1006llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) { 1007 // The element type being allocated. 1008 QualType allocType = getContext().getBaseElementType(E->getAllocatedType()); 1009 1010 // 1. Build a call to the allocation function. 1011 FunctionDecl *allocator = E->getOperatorNew(); 1012 const FunctionProtoType *allocatorType = 1013 allocator->getType()->castAs<FunctionProtoType>(); 1014 1015 CallArgList allocatorArgs; 1016 1017 // The allocation size is the first argument. 1018 QualType sizeType = getContext().getSizeType(); 1019 1020 llvm::Value *numElements = 0; 1021 llvm::Value *allocSizeWithoutCookie = 0; 1022 llvm::Value *allocSize = 1023 EmitCXXNewAllocSize(*this, E, numElements, allocSizeWithoutCookie); 1024 1025 allocatorArgs.add(RValue::get(allocSize), sizeType); 1026 1027 // Emit the rest of the arguments. 1028 // FIXME: Ideally, this should just use EmitCallArgs. 1029 CXXNewExpr::const_arg_iterator placementArg = E->placement_arg_begin(); 1030 1031 // First, use the types from the function type. 1032 // We start at 1 here because the first argument (the allocation size) 1033 // has already been emitted. 1034 for (unsigned i = 1, e = allocatorType->getNumArgs(); i != e; 1035 ++i, ++placementArg) { 1036 QualType argType = allocatorType->getArgType(i); 1037 1038 assert(getContext().hasSameUnqualifiedType(argType.getNonReferenceType(), 1039 placementArg->getType()) && 1040 "type mismatch in call argument!"); 1041 1042 EmitCallArg(allocatorArgs, *placementArg, argType); 1043 } 1044 1045 // Either we've emitted all the call args, or we have a call to a 1046 // variadic function. 1047 assert((placementArg == E->placement_arg_end() || 1048 allocatorType->isVariadic()) && 1049 "Extra arguments to non-variadic function!"); 1050 1051 // If we still have any arguments, emit them using the type of the argument. 1052 for (CXXNewExpr::const_arg_iterator placementArgsEnd = E->placement_arg_end(); 1053 placementArg != placementArgsEnd; ++placementArg) { 1054 EmitCallArg(allocatorArgs, *placementArg, placementArg->getType()); 1055 } 1056 1057 // Emit the allocation call. If the allocator is a global placement 1058 // operator, just "inline" it directly. 1059 RValue RV; 1060 if (allocator->isReservedGlobalPlacementOperator()) { 1061 assert(allocatorArgs.size() == 2); 1062 RV = allocatorArgs[1].RV; 1063 // TODO: kill any unnecessary computations done for the size 1064 // argument. 1065 } else { 1066 RV = EmitCall(CGM.getTypes().getFunctionInfo(allocatorArgs, allocatorType), 1067 CGM.GetAddrOfFunction(allocator), ReturnValueSlot(), 1068 allocatorArgs, allocator); 1069 } 1070 1071 // Emit a null check on the allocation result if the allocation 1072 // function is allowed to return null (because it has a non-throwing 1073 // exception spec; for this part, we inline 1074 // CXXNewExpr::shouldNullCheckAllocation()) and we have an 1075 // interesting initializer. 1076 bool nullCheck = allocatorType->isNothrow(getContext()) && 1077 !(allocType.isPODType(getContext()) && !E->hasInitializer()); 1078 1079 llvm::BasicBlock *nullCheckBB = 0; 1080 llvm::BasicBlock *contBB = 0; 1081 1082 llvm::Value *allocation = RV.getScalarVal(); 1083 unsigned AS = 1084 cast<llvm::PointerType>(allocation->getType())->getAddressSpace(); 1085 1086 // The null-check means that the initializer is conditionally 1087 // evaluated. 1088 ConditionalEvaluation conditional(*this); 1089 1090 if (nullCheck) { 1091 conditional.begin(*this); 1092 1093 nullCheckBB = Builder.GetInsertBlock(); 1094 llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull"); 1095 contBB = createBasicBlock("new.cont"); 1096 1097 llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull"); 1098 Builder.CreateCondBr(isNull, contBB, notNullBB); 1099 EmitBlock(notNullBB); 1100 } 1101 1102 assert((allocSize == allocSizeWithoutCookie) == 1103 CalculateCookiePadding(*this, E).isZero()); 1104 if (allocSize != allocSizeWithoutCookie) { 1105 assert(E->isArray()); 1106 allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation, 1107 numElements, 1108 E, allocType); 1109 } 1110 1111 // If there's an operator delete, enter a cleanup to call it if an 1112 // exception is thrown. 1113 EHScopeStack::stable_iterator operatorDeleteCleanup; 1114 if (E->getOperatorDelete() && 1115 !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) { 1116 EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs); 1117 operatorDeleteCleanup = EHStack.stable_begin(); 1118 } 1119 1120 const llvm::Type *elementPtrTy 1121 = ConvertTypeForMem(allocType)->getPointerTo(AS); 1122 llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy); 1123 1124 if (E->isArray()) { 1125 EmitNewInitializer(*this, E, result, numElements, allocSizeWithoutCookie); 1126 1127 // NewPtr is a pointer to the base element type. If we're 1128 // allocating an array of arrays, we'll need to cast back to the 1129 // array pointer type. 1130 const llvm::Type *resultType = ConvertTypeForMem(E->getType()); 1131 if (result->getType() != resultType) 1132 result = Builder.CreateBitCast(result, resultType); 1133 } else { 1134 EmitNewInitializer(*this, E, result, numElements, allocSizeWithoutCookie); 1135 } 1136 1137 // Deactivate the 'operator delete' cleanup if we finished 1138 // initialization. 1139 if (operatorDeleteCleanup.isValid()) 1140 DeactivateCleanupBlock(operatorDeleteCleanup); 1141 1142 if (nullCheck) { 1143 conditional.end(*this); 1144 1145 llvm::BasicBlock *notNullBB = Builder.GetInsertBlock(); 1146 EmitBlock(contBB); 1147 1148 llvm::PHINode *PHI = Builder.CreatePHI(result->getType(), 2); 1149 PHI->addIncoming(result, notNullBB); 1150 PHI->addIncoming(llvm::Constant::getNullValue(result->getType()), 1151 nullCheckBB); 1152 1153 result = PHI; 1154 } 1155 1156 return result; 1157} 1158 1159void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD, 1160 llvm::Value *Ptr, 1161 QualType DeleteTy) { 1162 assert(DeleteFD->getOverloadedOperator() == OO_Delete); 1163 1164 const FunctionProtoType *DeleteFTy = 1165 DeleteFD->getType()->getAs<FunctionProtoType>(); 1166 1167 CallArgList DeleteArgs; 1168 1169 // Check if we need to pass the size to the delete operator. 1170 llvm::Value *Size = 0; 1171 QualType SizeTy; 1172 if (DeleteFTy->getNumArgs() == 2) { 1173 SizeTy = DeleteFTy->getArgType(1); 1174 CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy); 1175 Size = llvm::ConstantInt::get(ConvertType(SizeTy), 1176 DeleteTypeSize.getQuantity()); 1177 } 1178 1179 QualType ArgTy = DeleteFTy->getArgType(0); 1180 llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy)); 1181 DeleteArgs.add(RValue::get(DeletePtr), ArgTy); 1182 1183 if (Size) 1184 DeleteArgs.add(RValue::get(Size), SizeTy); 1185 1186 // Emit the call to delete. 1187 EmitCall(CGM.getTypes().getFunctionInfo(DeleteArgs, DeleteFTy), 1188 CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(), 1189 DeleteArgs, DeleteFD); 1190} 1191 1192namespace { 1193 /// Calls the given 'operator delete' on a single object. 1194 struct CallObjectDelete : EHScopeStack::Cleanup { 1195 llvm::Value *Ptr; 1196 const FunctionDecl *OperatorDelete; 1197 QualType ElementType; 1198 1199 CallObjectDelete(llvm::Value *Ptr, 1200 const FunctionDecl *OperatorDelete, 1201 QualType ElementType) 1202 : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {} 1203 1204 void Emit(CodeGenFunction &CGF, bool IsForEH) { 1205 CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType); 1206 } 1207 }; 1208} 1209 1210/// Emit the code for deleting a single object. 1211static void EmitObjectDelete(CodeGenFunction &CGF, 1212 const FunctionDecl *OperatorDelete, 1213 llvm::Value *Ptr, 1214 QualType ElementType) { 1215 // Find the destructor for the type, if applicable. If the 1216 // destructor is virtual, we'll just emit the vcall and return. 1217 const CXXDestructorDecl *Dtor = 0; 1218 if (const RecordType *RT = ElementType->getAs<RecordType>()) { 1219 CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 1220 if (!RD->hasTrivialDestructor()) { 1221 Dtor = RD->getDestructor(); 1222 1223 if (Dtor->isVirtual()) { 1224 const llvm::Type *Ty = 1225 CGF.getTypes().GetFunctionType(CGF.getTypes().getFunctionInfo(Dtor, 1226 Dtor_Complete), 1227 /*isVariadic=*/false); 1228 1229 llvm::Value *Callee 1230 = CGF.BuildVirtualCall(Dtor, Dtor_Deleting, Ptr, Ty); 1231 CGF.EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0, 1232 0, 0); 1233 1234 // The dtor took care of deleting the object. 1235 return; 1236 } 1237 } 1238 } 1239 1240 // Make sure that we call delete even if the dtor throws. 1241 // This doesn't have to a conditional cleanup because we're going 1242 // to pop it off in a second. 1243 CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, 1244 Ptr, OperatorDelete, ElementType); 1245 1246 if (Dtor) 1247 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, 1248 /*ForVirtualBase=*/false, Ptr); 1249 else if (CGF.getLangOptions().ObjCAutoRefCount && 1250 ElementType->isObjCLifetimeType()) { 1251 switch (ElementType.getObjCLifetime()) { 1252 case Qualifiers::OCL_None: 1253 case Qualifiers::OCL_ExplicitNone: 1254 case Qualifiers::OCL_Autoreleasing: 1255 break; 1256 1257 case Qualifiers::OCL_Strong: { 1258 // Load the pointer value. 1259 llvm::Value *PtrValue = CGF.Builder.CreateLoad(Ptr, 1260 ElementType.isVolatileQualified()); 1261 1262 CGF.EmitARCRelease(PtrValue, /*precise*/ true); 1263 break; 1264 } 1265 1266 case Qualifiers::OCL_Weak: 1267 CGF.EmitARCDestroyWeak(Ptr); 1268 break; 1269 } 1270 } 1271 1272 CGF.PopCleanupBlock(); 1273} 1274 1275namespace { 1276 /// Calls the given 'operator delete' on an array of objects. 1277 struct CallArrayDelete : EHScopeStack::Cleanup { 1278 llvm::Value *Ptr; 1279 const FunctionDecl *OperatorDelete; 1280 llvm::Value *NumElements; 1281 QualType ElementType; 1282 CharUnits CookieSize; 1283 1284 CallArrayDelete(llvm::Value *Ptr, 1285 const FunctionDecl *OperatorDelete, 1286 llvm::Value *NumElements, 1287 QualType ElementType, 1288 CharUnits CookieSize) 1289 : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements), 1290 ElementType(ElementType), CookieSize(CookieSize) {} 1291 1292 void Emit(CodeGenFunction &CGF, bool IsForEH) { 1293 const FunctionProtoType *DeleteFTy = 1294 OperatorDelete->getType()->getAs<FunctionProtoType>(); 1295 assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2); 1296 1297 CallArgList Args; 1298 1299 // Pass the pointer as the first argument. 1300 QualType VoidPtrTy = DeleteFTy->getArgType(0); 1301 llvm::Value *DeletePtr 1302 = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy)); 1303 Args.add(RValue::get(DeletePtr), VoidPtrTy); 1304 1305 // Pass the original requested size as the second argument. 1306 if (DeleteFTy->getNumArgs() == 2) { 1307 QualType size_t = DeleteFTy->getArgType(1); 1308 const llvm::IntegerType *SizeTy 1309 = cast<llvm::IntegerType>(CGF.ConvertType(size_t)); 1310 1311 CharUnits ElementTypeSize = 1312 CGF.CGM.getContext().getTypeSizeInChars(ElementType); 1313 1314 // The size of an element, multiplied by the number of elements. 1315 llvm::Value *Size 1316 = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity()); 1317 Size = CGF.Builder.CreateMul(Size, NumElements); 1318 1319 // Plus the size of the cookie if applicable. 1320 if (!CookieSize.isZero()) { 1321 llvm::Value *CookieSizeV 1322 = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity()); 1323 Size = CGF.Builder.CreateAdd(Size, CookieSizeV); 1324 } 1325 1326 Args.add(RValue::get(Size), size_t); 1327 } 1328 1329 // Emit the call to delete. 1330 CGF.EmitCall(CGF.getTypes().getFunctionInfo(Args, DeleteFTy), 1331 CGF.CGM.GetAddrOfFunction(OperatorDelete), 1332 ReturnValueSlot(), Args, OperatorDelete); 1333 } 1334 }; 1335} 1336 1337/// Emit the code for deleting an array of objects. 1338static void EmitArrayDelete(CodeGenFunction &CGF, 1339 const CXXDeleteExpr *E, 1340 llvm::Value *Ptr, 1341 QualType ElementType) { 1342 llvm::Value *NumElements = 0; 1343 llvm::Value *AllocatedPtr = 0; 1344 CharUnits CookieSize; 1345 CGF.CGM.getCXXABI().ReadArrayCookie(CGF, Ptr, E, ElementType, 1346 NumElements, AllocatedPtr, CookieSize); 1347 1348 assert(AllocatedPtr && "ReadArrayCookie didn't set AllocatedPtr"); 1349 1350 // Make sure that we call delete even if one of the dtors throws. 1351 const FunctionDecl *OperatorDelete = E->getOperatorDelete(); 1352 CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup, 1353 AllocatedPtr, OperatorDelete, 1354 NumElements, ElementType, 1355 CookieSize); 1356 1357 if (const CXXRecordDecl *RD = ElementType->getAsCXXRecordDecl()) { 1358 if (!RD->hasTrivialDestructor()) { 1359 assert(NumElements && "ReadArrayCookie didn't find element count" 1360 " for a class with destructor"); 1361 CGF.EmitCXXAggrDestructorCall(RD->getDestructor(), NumElements, Ptr); 1362 } 1363 } else if (CGF.getLangOptions().ObjCAutoRefCount && 1364 ElementType->isObjCLifetimeType() && 1365 (ElementType.getObjCLifetime() == Qualifiers::OCL_Strong || 1366 ElementType.getObjCLifetime() == Qualifiers::OCL_Weak)) { 1367 bool IsStrong = ElementType.getObjCLifetime() == Qualifiers::OCL_Strong; 1368 const llvm::Type *SizeLTy = CGF.ConvertType(CGF.getContext().getSizeType()); 1369 llvm::Value *One = llvm::ConstantInt::get(SizeLTy, 1); 1370 1371 // Create a temporary for the loop index and initialize it with count of 1372 // array elements. 1373 llvm::Value *IndexPtr = CGF.CreateTempAlloca(SizeLTy, "loop.index"); 1374 1375 // Store the number of elements in the index pointer. 1376 CGF.Builder.CreateStore(NumElements, IndexPtr); 1377 1378 // Start the loop with a block that tests the condition. 1379 llvm::BasicBlock *CondBlock = CGF.createBasicBlock("for.cond"); 1380 llvm::BasicBlock *AfterFor = CGF.createBasicBlock("for.end"); 1381 1382 CGF.EmitBlock(CondBlock); 1383 1384 llvm::BasicBlock *ForBody = CGF.createBasicBlock("for.body"); 1385 1386 // Generate: if (loop-index != 0 fall to the loop body, 1387 // otherwise, go to the block after the for-loop. 1388 llvm::Value* zeroConstant = llvm::Constant::getNullValue(SizeLTy); 1389 llvm::Value *Counter = CGF.Builder.CreateLoad(IndexPtr); 1390 llvm::Value *IsNE = CGF.Builder.CreateICmpNE(Counter, zeroConstant, 1391 "isne"); 1392 // If the condition is true, execute the body. 1393 CGF.Builder.CreateCondBr(IsNE, ForBody, AfterFor); 1394 1395 CGF.EmitBlock(ForBody); 1396 1397 llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc"); 1398 // Inside the loop body, emit the constructor call on the array element. 1399 Counter = CGF.Builder.CreateLoad(IndexPtr); 1400 Counter = CGF.Builder.CreateSub(Counter, One); 1401 llvm::Value *Address = CGF.Builder.CreateInBoundsGEP(Ptr, Counter, 1402 "arrayidx"); 1403 if (IsStrong) 1404 CGF.EmitARCRelease(CGF.Builder.CreateLoad(Address, 1405 ElementType.isVolatileQualified()), 1406 /*precise*/ true); 1407 else 1408 CGF.EmitARCDestroyWeak(Address); 1409 1410 CGF.EmitBlock(ContinueBlock); 1411 1412 // Emit the decrement of the loop counter. 1413 Counter = CGF.Builder.CreateLoad(IndexPtr); 1414 Counter = CGF.Builder.CreateSub(Counter, One, "dec"); 1415 CGF.Builder.CreateStore(Counter, IndexPtr); 1416 1417 // Finally, branch back up to the condition for the next iteration. 1418 CGF.EmitBranch(CondBlock); 1419 1420 // Emit the fall-through block. 1421 CGF.EmitBlock(AfterFor, true); 1422 } 1423 1424 CGF.PopCleanupBlock(); 1425} 1426 1427void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) { 1428 1429 // Get at the argument before we performed the implicit conversion 1430 // to void*. 1431 const Expr *Arg = E->getArgument(); 1432 while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) { 1433 if (ICE->getCastKind() != CK_UserDefinedConversion && 1434 ICE->getType()->isVoidPointerType()) 1435 Arg = ICE->getSubExpr(); 1436 else 1437 break; 1438 } 1439 1440 llvm::Value *Ptr = EmitScalarExpr(Arg); 1441 1442 // Null check the pointer. 1443 llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull"); 1444 llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end"); 1445 1446 llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull"); 1447 1448 Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull); 1449 EmitBlock(DeleteNotNull); 1450 1451 // We might be deleting a pointer to array. If so, GEP down to the 1452 // first non-array element. 1453 // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*) 1454 QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType(); 1455 if (DeleteTy->isConstantArrayType()) { 1456 llvm::Value *Zero = Builder.getInt32(0); 1457 llvm::SmallVector<llvm::Value*,8> GEP; 1458 1459 GEP.push_back(Zero); // point at the outermost array 1460 1461 // For each layer of array type we're pointing at: 1462 while (const ConstantArrayType *Arr 1463 = getContext().getAsConstantArrayType(DeleteTy)) { 1464 // 1. Unpeel the array type. 1465 DeleteTy = Arr->getElementType(); 1466 1467 // 2. GEP to the first element of the array. 1468 GEP.push_back(Zero); 1469 } 1470 1471 Ptr = Builder.CreateInBoundsGEP(Ptr, GEP.begin(), GEP.end(), "del.first"); 1472 } 1473 1474 assert(ConvertTypeForMem(DeleteTy) == 1475 cast<llvm::PointerType>(Ptr->getType())->getElementType()); 1476 1477 if (E->isArrayForm()) { 1478 EmitArrayDelete(*this, E, Ptr, DeleteTy); 1479 } else { 1480 EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy); 1481 } 1482 1483 EmitBlock(DeleteEnd); 1484} 1485 1486static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) { 1487 // void __cxa_bad_typeid(); 1488 1489 const llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext()); 1490 const llvm::FunctionType *FTy = 1491 llvm::FunctionType::get(VoidTy, false); 1492 1493 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid"); 1494} 1495 1496static void EmitBadTypeidCall(CodeGenFunction &CGF) { 1497 llvm::Value *Fn = getBadTypeidFn(CGF); 1498 CGF.EmitCallOrInvoke(Fn, 0, 0).setDoesNotReturn(); 1499 CGF.Builder.CreateUnreachable(); 1500} 1501 1502static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, 1503 const Expr *E, 1504 const llvm::Type *StdTypeInfoPtrTy) { 1505 // Get the vtable pointer. 1506 llvm::Value *ThisPtr = CGF.EmitLValue(E).getAddress(); 1507 1508 // C++ [expr.typeid]p2: 1509 // If the glvalue expression is obtained by applying the unary * operator to 1510 // a pointer and the pointer is a null pointer value, the typeid expression 1511 // throws the std::bad_typeid exception. 1512 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E->IgnoreParens())) { 1513 if (UO->getOpcode() == UO_Deref) { 1514 llvm::BasicBlock *BadTypeidBlock = 1515 CGF.createBasicBlock("typeid.bad_typeid"); 1516 llvm::BasicBlock *EndBlock = 1517 CGF.createBasicBlock("typeid.end"); 1518 1519 llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr); 1520 CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock); 1521 1522 CGF.EmitBlock(BadTypeidBlock); 1523 EmitBadTypeidCall(CGF); 1524 CGF.EmitBlock(EndBlock); 1525 } 1526 } 1527 1528 llvm::Value *Value = CGF.GetVTablePtr(ThisPtr, 1529 StdTypeInfoPtrTy->getPointerTo()); 1530 1531 // Load the type info. 1532 Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL); 1533 return CGF.Builder.CreateLoad(Value); 1534} 1535 1536llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) { 1537 const llvm::Type *StdTypeInfoPtrTy = 1538 ConvertType(E->getType())->getPointerTo(); 1539 1540 if (E->isTypeOperand()) { 1541 llvm::Constant *TypeInfo = 1542 CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand()); 1543 return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy); 1544 } 1545 1546 // C++ [expr.typeid]p2: 1547 // When typeid is applied to a glvalue expression whose type is a 1548 // polymorphic class type, the result refers to a std::type_info object 1549 // representing the type of the most derived object (that is, the dynamic 1550 // type) to which the glvalue refers. 1551 if (E->getExprOperand()->isGLValue()) { 1552 if (const RecordType *RT = 1553 E->getExprOperand()->getType()->getAs<RecordType>()) { 1554 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 1555 if (RD->isPolymorphic()) 1556 return EmitTypeidFromVTable(*this, E->getExprOperand(), 1557 StdTypeInfoPtrTy); 1558 } 1559 } 1560 1561 QualType OperandTy = E->getExprOperand()->getType(); 1562 return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy), 1563 StdTypeInfoPtrTy); 1564} 1565 1566static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) { 1567 // void *__dynamic_cast(const void *sub, 1568 // const abi::__class_type_info *src, 1569 // const abi::__class_type_info *dst, 1570 // std::ptrdiff_t src2dst_offset); 1571 1572 const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); 1573 const llvm::Type *PtrDiffTy = 1574 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 1575 1576 const llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy }; 1577 1578 const llvm::FunctionType *FTy = 1579 llvm::FunctionType::get(Int8PtrTy, Args, false); 1580 1581 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast"); 1582} 1583 1584static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) { 1585 // void __cxa_bad_cast(); 1586 1587 const llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext()); 1588 const llvm::FunctionType *FTy = 1589 llvm::FunctionType::get(VoidTy, false); 1590 1591 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast"); 1592} 1593 1594static void EmitBadCastCall(CodeGenFunction &CGF) { 1595 llvm::Value *Fn = getBadCastFn(CGF); 1596 CGF.EmitCallOrInvoke(Fn, 0, 0).setDoesNotReturn(); 1597 CGF.Builder.CreateUnreachable(); 1598} 1599 1600static llvm::Value * 1601EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value, 1602 QualType SrcTy, QualType DestTy, 1603 llvm::BasicBlock *CastEnd) { 1604 const llvm::Type *PtrDiffLTy = 1605 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 1606 const llvm::Type *DestLTy = CGF.ConvertType(DestTy); 1607 1608 if (const PointerType *PTy = DestTy->getAs<PointerType>()) { 1609 if (PTy->getPointeeType()->isVoidType()) { 1610 // C++ [expr.dynamic.cast]p7: 1611 // If T is "pointer to cv void," then the result is a pointer to the 1612 // most derived object pointed to by v. 1613 1614 // Get the vtable pointer. 1615 llvm::Value *VTable = CGF.GetVTablePtr(Value, PtrDiffLTy->getPointerTo()); 1616 1617 // Get the offset-to-top from the vtable. 1618 llvm::Value *OffsetToTop = 1619 CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL); 1620 OffsetToTop = CGF.Builder.CreateLoad(OffsetToTop, "offset.to.top"); 1621 1622 // Finally, add the offset to the pointer. 1623 Value = CGF.EmitCastToVoidPtr(Value); 1624 Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop); 1625 1626 return CGF.Builder.CreateBitCast(Value, DestLTy); 1627 } 1628 } 1629 1630 QualType SrcRecordTy; 1631 QualType DestRecordTy; 1632 1633 if (const PointerType *DestPTy = DestTy->getAs<PointerType>()) { 1634 SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType(); 1635 DestRecordTy = DestPTy->getPointeeType(); 1636 } else { 1637 SrcRecordTy = SrcTy; 1638 DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType(); 1639 } 1640 1641 assert(SrcRecordTy->isRecordType() && "source type must be a record type!"); 1642 assert(DestRecordTy->isRecordType() && "dest type must be a record type!"); 1643 1644 llvm::Value *SrcRTTI = 1645 CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType()); 1646 llvm::Value *DestRTTI = 1647 CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType()); 1648 1649 // FIXME: Actually compute a hint here. 1650 llvm::Value *OffsetHint = llvm::ConstantInt::get(PtrDiffLTy, -1ULL); 1651 1652 // Emit the call to __dynamic_cast. 1653 Value = CGF.EmitCastToVoidPtr(Value); 1654 Value = CGF.Builder.CreateCall4(getDynamicCastFn(CGF), Value, 1655 SrcRTTI, DestRTTI, OffsetHint); 1656 Value = CGF.Builder.CreateBitCast(Value, DestLTy); 1657 1658 /// C++ [expr.dynamic.cast]p9: 1659 /// A failed cast to reference type throws std::bad_cast 1660 if (DestTy->isReferenceType()) { 1661 llvm::BasicBlock *BadCastBlock = 1662 CGF.createBasicBlock("dynamic_cast.bad_cast"); 1663 1664 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value); 1665 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd); 1666 1667 CGF.EmitBlock(BadCastBlock); 1668 EmitBadCastCall(CGF); 1669 } 1670 1671 return Value; 1672} 1673 1674static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF, 1675 QualType DestTy) { 1676 const llvm::Type *DestLTy = CGF.ConvertType(DestTy); 1677 if (DestTy->isPointerType()) 1678 return llvm::Constant::getNullValue(DestLTy); 1679 1680 /// C++ [expr.dynamic.cast]p9: 1681 /// A failed cast to reference type throws std::bad_cast 1682 EmitBadCastCall(CGF); 1683 1684 CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end")); 1685 return llvm::UndefValue::get(DestLTy); 1686} 1687 1688llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value, 1689 const CXXDynamicCastExpr *DCE) { 1690 QualType DestTy = DCE->getTypeAsWritten(); 1691 1692 if (DCE->isAlwaysNull()) 1693 return EmitDynamicCastToNull(*this, DestTy); 1694 1695 QualType SrcTy = DCE->getSubExpr()->getType(); 1696 1697 // C++ [expr.dynamic.cast]p4: 1698 // If the value of v is a null pointer value in the pointer case, the result 1699 // is the null pointer value of type T. 1700 bool ShouldNullCheckSrcValue = SrcTy->isPointerType(); 1701 1702 llvm::BasicBlock *CastNull = 0; 1703 llvm::BasicBlock *CastNotNull = 0; 1704 llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end"); 1705 1706 if (ShouldNullCheckSrcValue) { 1707 CastNull = createBasicBlock("dynamic_cast.null"); 1708 CastNotNull = createBasicBlock("dynamic_cast.notnull"); 1709 1710 llvm::Value *IsNull = Builder.CreateIsNull(Value); 1711 Builder.CreateCondBr(IsNull, CastNull, CastNotNull); 1712 EmitBlock(CastNotNull); 1713 } 1714 1715 Value = EmitDynamicCastCall(*this, Value, SrcTy, DestTy, CastEnd); 1716 1717 if (ShouldNullCheckSrcValue) { 1718 EmitBranch(CastEnd); 1719 1720 EmitBlock(CastNull); 1721 EmitBranch(CastEnd); 1722 } 1723 1724 EmitBlock(CastEnd); 1725 1726 if (ShouldNullCheckSrcValue) { 1727 llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2); 1728 PHI->addIncoming(Value, CastNotNull); 1729 PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull); 1730 1731 Value = PHI; 1732 } 1733 1734 return Value; 1735} 1736