CGExprCXX.cpp revision cc09c022bebcabd5f222d410bb6695af0ea93257
1//===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This contains code dealing with code generation of C++ expressions 11// 12//===----------------------------------------------------------------------===// 13 14#include "CodeGenFunction.h" 15#include "CGCXXABI.h" 16#include "CGObjCRuntime.h" 17#include "llvm/Intrinsics.h" 18using namespace clang; 19using namespace CodeGen; 20 21RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD, 22 llvm::Value *Callee, 23 ReturnValueSlot ReturnValue, 24 llvm::Value *This, 25 llvm::Value *VTT, 26 CallExpr::const_arg_iterator ArgBeg, 27 CallExpr::const_arg_iterator ArgEnd) { 28 assert(MD->isInstance() && 29 "Trying to emit a member call expr on a static method!"); 30 31 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 32 33 CallArgList Args; 34 35 // Push the this ptr. 36 Args.push_back(std::make_pair(RValue::get(This), 37 MD->getThisType(getContext()))); 38 39 // If there is a VTT parameter, emit it. 40 if (VTT) { 41 QualType T = getContext().getPointerType(getContext().VoidPtrTy); 42 Args.push_back(std::make_pair(RValue::get(VTT), T)); 43 } 44 45 // And the rest of the call args 46 EmitCallArgs(Args, FPT, ArgBeg, ArgEnd); 47 48 QualType ResultType = FPT->getResultType(); 49 return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args, 50 FPT->getExtInfo()), 51 Callee, ReturnValue, Args, MD); 52} 53 54/// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given 55/// expr can be devirtualized. 56static bool canDevirtualizeMemberFunctionCalls(const Expr *Base) { 57 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) { 58 if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) { 59 // This is a record decl. We know the type and can devirtualize it. 60 return VD->getType()->isRecordType(); 61 } 62 63 return false; 64 } 65 66 // We can always devirtualize calls on temporary object expressions. 67 if (isa<CXXConstructExpr>(Base)) 68 return true; 69 70 // And calls on bound temporaries. 71 if (isa<CXXBindTemporaryExpr>(Base)) 72 return true; 73 74 // Check if this is a call expr that returns a record type. 75 if (const CallExpr *CE = dyn_cast<CallExpr>(Base)) 76 return CE->getCallReturnType()->isRecordType(); 77 78 // We can't devirtualize the call. 79 return false; 80} 81 82RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE, 83 ReturnValueSlot ReturnValue) { 84 if (isa<BinaryOperator>(CE->getCallee()->IgnoreParens())) 85 return EmitCXXMemberPointerCallExpr(CE, ReturnValue); 86 87 const MemberExpr *ME = cast<MemberExpr>(CE->getCallee()->IgnoreParens()); 88 const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl()); 89 90 if (MD->isStatic()) { 91 // The method is static, emit it as we would a regular call. 92 llvm::Value *Callee = CGM.GetAddrOfFunction(MD); 93 return EmitCall(getContext().getPointerType(MD->getType()), Callee, 94 ReturnValue, CE->arg_begin(), CE->arg_end()); 95 } 96 97 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 98 99 const llvm::Type *Ty = 100 CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD), 101 FPT->isVariadic()); 102 llvm::Value *This; 103 104 if (ME->isArrow()) 105 This = EmitScalarExpr(ME->getBase()); 106 else { 107 LValue BaseLV = EmitLValue(ME->getBase()); 108 This = BaseLV.getAddress(); 109 } 110 111 if (MD->isCopyAssignment() && MD->isTrivial()) { 112 // We don't like to generate the trivial copy assignment operator when 113 // it isn't necessary; just produce the proper effect here. 114 llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress(); 115 EmitAggregateCopy(This, RHS, CE->getType()); 116 return RValue::get(This); 117 } 118 119 // C++ [class.virtual]p12: 120 // Explicit qualification with the scope operator (5.1) suppresses the 121 // virtual call mechanism. 122 // 123 // We also don't emit a virtual call if the base expression has a record type 124 // because then we know what the type is. 125 llvm::Value *Callee; 126 if (const CXXDestructorDecl *Destructor 127 = dyn_cast<CXXDestructorDecl>(MD)) { 128 if (Destructor->isTrivial()) 129 return RValue::get(0); 130 if (MD->isVirtual() && !ME->hasQualifier() && 131 !canDevirtualizeMemberFunctionCalls(ME->getBase())) { 132 Callee = BuildVirtualCall(Destructor, Dtor_Complete, This, Ty); 133 } else { 134 Callee = CGM.GetAddrOfFunction(GlobalDecl(Destructor, Dtor_Complete), Ty); 135 } 136 } else if (MD->isVirtual() && !ME->hasQualifier() && 137 !canDevirtualizeMemberFunctionCalls(ME->getBase())) { 138 Callee = BuildVirtualCall(MD, This, Ty); 139 } else { 140 Callee = CGM.GetAddrOfFunction(MD, Ty); 141 } 142 143 return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0, 144 CE->arg_begin(), CE->arg_end()); 145} 146 147RValue 148CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, 149 ReturnValueSlot ReturnValue) { 150 const BinaryOperator *BO = 151 cast<BinaryOperator>(E->getCallee()->IgnoreParens()); 152 const Expr *BaseExpr = BO->getLHS(); 153 const Expr *MemFnExpr = BO->getRHS(); 154 155 const MemberPointerType *MPT = 156 MemFnExpr->getType()->getAs<MemberPointerType>(); 157 158 const FunctionProtoType *FPT = 159 MPT->getPointeeType()->getAs<FunctionProtoType>(); 160 const CXXRecordDecl *RD = 161 cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl()); 162 163 // Get the member function pointer. 164 llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr); 165 166 // Emit the 'this' pointer. 167 llvm::Value *This; 168 169 if (BO->getOpcode() == BO_PtrMemI) 170 This = EmitScalarExpr(BaseExpr); 171 else 172 This = EmitLValue(BaseExpr).getAddress(); 173 174 // Ask the ABI to load the callee. Note that This is modified. 175 llvm::Value *Callee = 176 CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(CGF, This, MemFnPtr, MPT); 177 178 CallArgList Args; 179 180 QualType ThisType = 181 getContext().getPointerType(getContext().getTagDeclType(RD)); 182 183 // Push the this ptr. 184 Args.push_back(std::make_pair(RValue::get(This), ThisType)); 185 186 // And the rest of the call args 187 EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end()); 188 const FunctionType *BO_FPT = BO->getType()->getAs<FunctionProtoType>(); 189 return EmitCall(CGM.getTypes().getFunctionInfo(Args, BO_FPT), Callee, 190 ReturnValue, Args); 191} 192 193RValue 194CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, 195 const CXXMethodDecl *MD, 196 ReturnValueSlot ReturnValue) { 197 assert(MD->isInstance() && 198 "Trying to emit a member call expr on a static method!"); 199 if (MD->isCopyAssignment()) { 200 const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(MD->getDeclContext()); 201 if (ClassDecl->hasTrivialCopyAssignment()) { 202 assert(!ClassDecl->hasUserDeclaredCopyAssignment() && 203 "EmitCXXOperatorMemberCallExpr - user declared copy assignment"); 204 LValue LV = EmitLValue(E->getArg(0)); 205 llvm::Value *This; 206 if (LV.isPropertyRef() || LV.isKVCRef()) { 207 llvm::Value *AggLoc = CreateMemTemp(E->getArg(1)->getType()); 208 EmitAggExpr(E->getArg(1), AggLoc, false /*VolatileDest*/); 209 if (LV.isPropertyRef()) 210 EmitObjCPropertySet(LV.getPropertyRefExpr(), 211 RValue::getAggregate(AggLoc, 212 false /*VolatileDest*/)); 213 else 214 EmitObjCPropertySet(LV.getKVCRefExpr(), 215 RValue::getAggregate(AggLoc, 216 false /*VolatileDest*/)); 217 return RValue::getAggregate(0, false); 218 } 219 else 220 This = LV.getAddress(); 221 222 llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress(); 223 QualType Ty = E->getType(); 224 EmitAggregateCopy(This, Src, Ty); 225 return RValue::get(This); 226 } 227 } 228 229 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 230 const llvm::Type *Ty = 231 CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD), 232 FPT->isVariadic()); 233 LValue LV = EmitLValue(E->getArg(0)); 234 llvm::Value *This; 235 if (LV.isPropertyRef() || LV.isKVCRef()) { 236 QualType QT = E->getArg(0)->getType(); 237 RValue RV = 238 LV.isPropertyRef() ? EmitLoadOfPropertyRefLValue(LV, QT) 239 : EmitLoadOfKVCRefLValue(LV, QT); 240 assert (!RV.isScalar() && "EmitCXXOperatorMemberCallExpr"); 241 This = RV.getAggregateAddr(); 242 } 243 else 244 This = LV.getAddress(); 245 246 llvm::Value *Callee; 247 if (MD->isVirtual() && !canDevirtualizeMemberFunctionCalls(E->getArg(0))) 248 Callee = BuildVirtualCall(MD, This, Ty); 249 else 250 Callee = CGM.GetAddrOfFunction(MD, Ty); 251 252 return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0, 253 E->arg_begin() + 1, E->arg_end()); 254} 255 256void 257CodeGenFunction::EmitCXXConstructExpr(llvm::Value *Dest, 258 const CXXConstructExpr *E) { 259 assert(Dest && "Must have a destination!"); 260 const CXXConstructorDecl *CD = E->getConstructor(); 261 262 // If we require zero initialization before (or instead of) calling the 263 // constructor, as can be the case with a non-user-provided default 264 // constructor, emit the zero initialization now. 265 if (E->requiresZeroInitialization()) 266 EmitNullInitialization(Dest, E->getType()); 267 268 269 // If this is a call to a trivial default constructor, do nothing. 270 if (CD->isTrivial() && CD->isDefaultConstructor()) 271 return; 272 273 // Code gen optimization to eliminate copy constructor and return 274 // its first argument instead, if in fact that argument is a temporary 275 // object. 276 if (getContext().getLangOptions().ElideConstructors && E->isElidable()) { 277 if (const Expr *Arg = E->getArg(0)->getTemporaryObject()) { 278 EmitAggExpr(Arg, Dest, false); 279 return; 280 } 281 } 282 283 const ConstantArrayType *Array 284 = getContext().getAsConstantArrayType(E->getType()); 285 if (Array) { 286 QualType BaseElementTy = getContext().getBaseElementType(Array); 287 const llvm::Type *BasePtr = ConvertType(BaseElementTy); 288 BasePtr = llvm::PointerType::getUnqual(BasePtr); 289 llvm::Value *BaseAddrPtr = 290 Builder.CreateBitCast(Dest, BasePtr); 291 292 EmitCXXAggrConstructorCall(CD, Array, BaseAddrPtr, 293 E->arg_begin(), E->arg_end()); 294 } 295 else { 296 CXXCtorType Type = 297 (E->getConstructionKind() == CXXConstructExpr::CK_Complete) 298 ? Ctor_Complete : Ctor_Base; 299 bool ForVirtualBase = 300 E->getConstructionKind() == CXXConstructExpr::CK_VirtualBase; 301 302 // Call the constructor. 303 EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest, 304 E->arg_begin(), E->arg_end()); 305 } 306} 307 308/// Check whether the given operator new[] is the global placement 309/// operator new[]. 310static bool IsPlacementOperatorNewArray(ASTContext &Ctx, 311 const FunctionDecl *Fn) { 312 // Must be in global scope. Note that allocation functions can't be 313 // declared in namespaces. 314 if (!Fn->getDeclContext()->getRedeclContext()->isFileContext()) 315 return false; 316 317 // Signature must be void *operator new[](size_t, void*). 318 // The size_t is common to all operator new[]s. 319 if (Fn->getNumParams() != 2) 320 return false; 321 322 CanQualType ParamType = Ctx.getCanonicalType(Fn->getParamDecl(1)->getType()); 323 return (ParamType == Ctx.VoidPtrTy); 324} 325 326static CharUnits CalculateCookiePadding(CodeGenFunction &CGF, 327 const CXXNewExpr *E) { 328 if (!E->isArray()) 329 return CharUnits::Zero(); 330 331 // No cookie is required if the new operator being used is 332 // ::operator new[](size_t, void*). 333 const FunctionDecl *OperatorNew = E->getOperatorNew(); 334 if (IsPlacementOperatorNewArray(CGF.getContext(), OperatorNew)) 335 return CharUnits::Zero(); 336 337 return CGF.CGM.getCXXABI().GetArrayCookieSize(E->getAllocatedType()); 338} 339 340static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context, 341 CodeGenFunction &CGF, 342 const CXXNewExpr *E, 343 llvm::Value *&NumElements, 344 llvm::Value *&SizeWithoutCookie) { 345 QualType ElemType = E->getAllocatedType(); 346 347 const llvm::IntegerType *SizeTy = 348 cast<llvm::IntegerType>(CGF.ConvertType(CGF.getContext().getSizeType())); 349 350 CharUnits TypeSize = CGF.getContext().getTypeSizeInChars(ElemType); 351 352 if (!E->isArray()) { 353 SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity()); 354 return SizeWithoutCookie; 355 } 356 357 // Figure out the cookie size. 358 CharUnits CookieSize = CalculateCookiePadding(CGF, E); 359 360 // Emit the array size expression. 361 // We multiply the size of all dimensions for NumElements. 362 // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6. 363 NumElements = CGF.EmitScalarExpr(E->getArraySize()); 364 assert(NumElements->getType() == SizeTy && "element count not a size_t"); 365 366 uint64_t ArraySizeMultiplier = 1; 367 while (const ConstantArrayType *CAT 368 = CGF.getContext().getAsConstantArrayType(ElemType)) { 369 ElemType = CAT->getElementType(); 370 ArraySizeMultiplier *= CAT->getSize().getZExtValue(); 371 } 372 373 llvm::Value *Size; 374 375 // If someone is doing 'new int[42]' there is no need to do a dynamic check. 376 // Don't bloat the -O0 code. 377 if (llvm::ConstantInt *NumElementsC = 378 dyn_cast<llvm::ConstantInt>(NumElements)) { 379 llvm::APInt NEC = NumElementsC->getValue(); 380 unsigned SizeWidth = NEC.getBitWidth(); 381 382 // Determine if there is an overflow here by doing an extended multiply. 383 NEC.zext(SizeWidth*2); 384 llvm::APInt SC(SizeWidth*2, TypeSize.getQuantity()); 385 SC *= NEC; 386 387 if (!CookieSize.isZero()) { 388 // Save the current size without a cookie. We don't care if an 389 // overflow's already happened because SizeWithoutCookie isn't 390 // used if the allocator returns null or throws, as it should 391 // always do on an overflow. 392 llvm::APInt SWC = SC; 393 SWC.trunc(SizeWidth); 394 SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, SWC); 395 396 // Add the cookie size. 397 SC += llvm::APInt(SizeWidth*2, CookieSize.getQuantity()); 398 } 399 400 if (SC.countLeadingZeros() >= SizeWidth) { 401 SC.trunc(SizeWidth); 402 Size = llvm::ConstantInt::get(SizeTy, SC); 403 } else { 404 // On overflow, produce a -1 so operator new throws. 405 Size = llvm::Constant::getAllOnesValue(SizeTy); 406 } 407 408 // Scale NumElements while we're at it. 409 uint64_t N = NEC.getZExtValue() * ArraySizeMultiplier; 410 NumElements = llvm::ConstantInt::get(SizeTy, N); 411 412 // Otherwise, we don't need to do an overflow-checked multiplication if 413 // we're multiplying by one. 414 } else if (TypeSize.isOne()) { 415 assert(ArraySizeMultiplier == 1); 416 417 Size = NumElements; 418 419 // If we need a cookie, add its size in with an overflow check. 420 // This is maybe a little paranoid. 421 if (!CookieSize.isZero()) { 422 SizeWithoutCookie = Size; 423 424 llvm::Value *CookieSizeV 425 = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity()); 426 427 const llvm::Type *Types[] = { SizeTy }; 428 llvm::Value *UAddF 429 = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1); 430 llvm::Value *AddRes 431 = CGF.Builder.CreateCall2(UAddF, Size, CookieSizeV); 432 433 Size = CGF.Builder.CreateExtractValue(AddRes, 0); 434 llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1); 435 Size = CGF.Builder.CreateSelect(DidOverflow, 436 llvm::ConstantInt::get(SizeTy, -1), 437 Size); 438 } 439 440 // Otherwise use the int.umul.with.overflow intrinsic. 441 } else { 442 llvm::Value *OutermostElementSize 443 = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity()); 444 445 llvm::Value *NumOutermostElements = NumElements; 446 447 // Scale NumElements by the array size multiplier. This might 448 // overflow, but only if the multiplication below also overflows, 449 // in which case this multiplication isn't used. 450 if (ArraySizeMultiplier != 1) 451 NumElements = CGF.Builder.CreateMul(NumElements, 452 llvm::ConstantInt::get(SizeTy, ArraySizeMultiplier)); 453 454 // The requested size of the outermost array is non-constant. 455 // Multiply that by the static size of the elements of that array; 456 // on unsigned overflow, set the size to -1 to trigger an 457 // exception from the allocation routine. This is sufficient to 458 // prevent buffer overruns from the allocator returning a 459 // seemingly valid pointer to insufficient space. This idea comes 460 // originally from MSVC, and GCC has an open bug requesting 461 // similar behavior: 462 // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=19351 463 // 464 // This will not be sufficient for C++0x, which requires a 465 // specific exception class (std::bad_array_new_length). 466 // That will require ABI support that has not yet been specified. 467 const llvm::Type *Types[] = { SizeTy }; 468 llvm::Value *UMulF 469 = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, Types, 1); 470 llvm::Value *MulRes = CGF.Builder.CreateCall2(UMulF, NumOutermostElements, 471 OutermostElementSize); 472 473 // The overflow bit. 474 llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(MulRes, 1); 475 476 // The result of the multiplication. 477 Size = CGF.Builder.CreateExtractValue(MulRes, 0); 478 479 // If we have a cookie, we need to add that size in, too. 480 if (!CookieSize.isZero()) { 481 SizeWithoutCookie = Size; 482 483 llvm::Value *CookieSizeV 484 = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity()); 485 llvm::Value *UAddF 486 = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1); 487 llvm::Value *AddRes 488 = CGF.Builder.CreateCall2(UAddF, SizeWithoutCookie, CookieSizeV); 489 490 Size = CGF.Builder.CreateExtractValue(AddRes, 0); 491 492 llvm::Value *AddDidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1); 493 DidOverflow = CGF.Builder.CreateAnd(DidOverflow, AddDidOverflow); 494 } 495 496 Size = CGF.Builder.CreateSelect(DidOverflow, 497 llvm::ConstantInt::get(SizeTy, -1), 498 Size); 499 } 500 501 if (CookieSize.isZero()) 502 SizeWithoutCookie = Size; 503 else 504 assert(SizeWithoutCookie && "didn't set SizeWithoutCookie?"); 505 506 return Size; 507} 508 509static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E, 510 llvm::Value *NewPtr) { 511 512 assert(E->getNumConstructorArgs() == 1 && 513 "Can only have one argument to initializer of POD type."); 514 515 const Expr *Init = E->getConstructorArg(0); 516 QualType AllocType = E->getAllocatedType(); 517 518 unsigned Alignment = 519 CGF.getContext().getTypeAlignInChars(AllocType).getQuantity(); 520 if (!CGF.hasAggregateLLVMType(AllocType)) 521 CGF.EmitStoreOfScalar(CGF.EmitScalarExpr(Init), NewPtr, 522 AllocType.isVolatileQualified(), Alignment, 523 AllocType); 524 else if (AllocType->isAnyComplexType()) 525 CGF.EmitComplexExprIntoAddr(Init, NewPtr, 526 AllocType.isVolatileQualified()); 527 else 528 CGF.EmitAggExpr(Init, NewPtr, AllocType.isVolatileQualified()); 529} 530 531void 532CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E, 533 llvm::Value *NewPtr, 534 llvm::Value *NumElements) { 535 // We have a POD type. 536 if (E->getNumConstructorArgs() == 0) 537 return; 538 539 const llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); 540 541 // Create a temporary for the loop index and initialize it with 0. 542 llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index"); 543 llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy); 544 Builder.CreateStore(Zero, IndexPtr); 545 546 // Start the loop with a block that tests the condition. 547 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond"); 548 llvm::BasicBlock *AfterFor = createBasicBlock("for.end"); 549 550 EmitBlock(CondBlock); 551 552 llvm::BasicBlock *ForBody = createBasicBlock("for.body"); 553 554 // Generate: if (loop-index < number-of-elements fall to the loop body, 555 // otherwise, go to the block after the for-loop. 556 llvm::Value *Counter = Builder.CreateLoad(IndexPtr); 557 llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElements, "isless"); 558 // If the condition is true, execute the body. 559 Builder.CreateCondBr(IsLess, ForBody, AfterFor); 560 561 EmitBlock(ForBody); 562 563 llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc"); 564 // Inside the loop body, emit the constructor call on the array element. 565 Counter = Builder.CreateLoad(IndexPtr); 566 llvm::Value *Address = Builder.CreateInBoundsGEP(NewPtr, Counter, 567 "arrayidx"); 568 StoreAnyExprIntoOneUnit(*this, E, Address); 569 570 EmitBlock(ContinueBlock); 571 572 // Emit the increment of the loop counter. 573 llvm::Value *NextVal = llvm::ConstantInt::get(SizeTy, 1); 574 Counter = Builder.CreateLoad(IndexPtr); 575 NextVal = Builder.CreateAdd(Counter, NextVal, "inc"); 576 Builder.CreateStore(NextVal, IndexPtr); 577 578 // Finally, branch back up to the condition for the next iteration. 579 EmitBranch(CondBlock); 580 581 // Emit the fall-through block. 582 EmitBlock(AfterFor, true); 583} 584 585static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T, 586 llvm::Value *NewPtr, llvm::Value *Size) { 587 llvm::LLVMContext &VMContext = CGF.CGM.getLLVMContext(); 588 const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext); 589 if (NewPtr->getType() != BP) 590 NewPtr = CGF.Builder.CreateBitCast(NewPtr, BP, "tmp"); 591 592 CGF.Builder.CreateCall5(CGF.CGM.getMemSetFn(BP, CGF.IntPtrTy), NewPtr, 593 llvm::Constant::getNullValue(llvm::Type::getInt8Ty(VMContext)), 594 Size, 595 llvm::ConstantInt::get(CGF.Int32Ty, 596 CGF.getContext().getTypeAlign(T)/8), 597 llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 598 0)); 599} 600 601static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E, 602 llvm::Value *NewPtr, 603 llvm::Value *NumElements, 604 llvm::Value *AllocSizeWithoutCookie) { 605 if (E->isArray()) { 606 if (CXXConstructorDecl *Ctor = E->getConstructor()) { 607 bool RequiresZeroInitialization = false; 608 if (Ctor->getParent()->hasTrivialConstructor()) { 609 // If new expression did not specify value-initialization, then there 610 // is no initialization. 611 if (!E->hasInitializer() || Ctor->getParent()->isEmpty()) 612 return; 613 614 if (CGF.CGM.getTypes().isZeroInitializable(E->getAllocatedType())) { 615 // Optimization: since zero initialization will just set the memory 616 // to all zeroes, generate a single memset to do it in one shot. 617 EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr, 618 AllocSizeWithoutCookie); 619 return; 620 } 621 622 RequiresZeroInitialization = true; 623 } 624 625 CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr, 626 E->constructor_arg_begin(), 627 E->constructor_arg_end(), 628 RequiresZeroInitialization); 629 return; 630 } else if (E->getNumConstructorArgs() == 1 && 631 isa<ImplicitValueInitExpr>(E->getConstructorArg(0))) { 632 // Optimization: since zero initialization will just set the memory 633 // to all zeroes, generate a single memset to do it in one shot. 634 EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr, 635 AllocSizeWithoutCookie); 636 return; 637 } else { 638 CGF.EmitNewArrayInitializer(E, NewPtr, NumElements); 639 return; 640 } 641 } 642 643 if (CXXConstructorDecl *Ctor = E->getConstructor()) { 644 // Per C++ [expr.new]p15, if we have an initializer, then we're performing 645 // direct initialization. C++ [dcl.init]p5 requires that we 646 // zero-initialize storage if there are no user-declared constructors. 647 if (E->hasInitializer() && 648 !Ctor->getParent()->hasUserDeclaredConstructor() && 649 !Ctor->getParent()->isEmpty()) 650 CGF.EmitNullInitialization(NewPtr, E->getAllocatedType()); 651 652 CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false, 653 NewPtr, E->constructor_arg_begin(), 654 E->constructor_arg_end()); 655 656 return; 657 } 658 // We have a POD type. 659 if (E->getNumConstructorArgs() == 0) 660 return; 661 662 StoreAnyExprIntoOneUnit(CGF, E, NewPtr); 663} 664 665llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) { 666 QualType AllocType = E->getAllocatedType(); 667 if (AllocType->isArrayType()) 668 while (const ArrayType *AType = getContext().getAsArrayType(AllocType)) 669 AllocType = AType->getElementType(); 670 671 FunctionDecl *NewFD = E->getOperatorNew(); 672 const FunctionProtoType *NewFTy = NewFD->getType()->getAs<FunctionProtoType>(); 673 674 CallArgList NewArgs; 675 676 // The allocation size is the first argument. 677 QualType SizeTy = getContext().getSizeType(); 678 679 llvm::Value *NumElements = 0; 680 llvm::Value *AllocSizeWithoutCookie = 0; 681 llvm::Value *AllocSize = EmitCXXNewAllocSize(getContext(), 682 *this, E, NumElements, 683 AllocSizeWithoutCookie); 684 685 NewArgs.push_back(std::make_pair(RValue::get(AllocSize), SizeTy)); 686 687 // Emit the rest of the arguments. 688 // FIXME: Ideally, this should just use EmitCallArgs. 689 CXXNewExpr::const_arg_iterator NewArg = E->placement_arg_begin(); 690 691 // First, use the types from the function type. 692 // We start at 1 here because the first argument (the allocation size) 693 // has already been emitted. 694 for (unsigned i = 1, e = NewFTy->getNumArgs(); i != e; ++i, ++NewArg) { 695 QualType ArgType = NewFTy->getArgType(i); 696 697 assert(getContext().getCanonicalType(ArgType.getNonReferenceType()). 698 getTypePtr() == 699 getContext().getCanonicalType(NewArg->getType()).getTypePtr() && 700 "type mismatch in call argument!"); 701 702 NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType), 703 ArgType)); 704 705 } 706 707 // Either we've emitted all the call args, or we have a call to a 708 // variadic function. 709 assert((NewArg == E->placement_arg_end() || NewFTy->isVariadic()) && 710 "Extra arguments in non-variadic function!"); 711 712 // If we still have any arguments, emit them using the type of the argument. 713 for (CXXNewExpr::const_arg_iterator NewArgEnd = E->placement_arg_end(); 714 NewArg != NewArgEnd; ++NewArg) { 715 QualType ArgType = NewArg->getType(); 716 NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType), 717 ArgType)); 718 } 719 720 // Emit the call to new. 721 RValue RV = 722 EmitCall(CGM.getTypes().getFunctionInfo(NewArgs, NewFTy), 723 CGM.GetAddrOfFunction(NewFD), ReturnValueSlot(), NewArgs, NewFD); 724 725 // If an allocation function is declared with an empty exception specification 726 // it returns null to indicate failure to allocate storage. [expr.new]p13. 727 // (We don't need to check for null when there's no new initializer and 728 // we're allocating a POD type). 729 bool NullCheckResult = NewFTy->hasEmptyExceptionSpec() && 730 !(AllocType->isPODType() && !E->hasInitializer()); 731 732 llvm::BasicBlock *NullCheckSource = 0; 733 llvm::BasicBlock *NewNotNull = 0; 734 llvm::BasicBlock *NewEnd = 0; 735 736 llvm::Value *NewPtr = RV.getScalarVal(); 737 unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace(); 738 739 if (NullCheckResult) { 740 NullCheckSource = Builder.GetInsertBlock(); 741 NewNotNull = createBasicBlock("new.notnull"); 742 NewEnd = createBasicBlock("new.end"); 743 744 llvm::Value *IsNull = Builder.CreateIsNull(NewPtr, "new.isnull"); 745 Builder.CreateCondBr(IsNull, NewEnd, NewNotNull); 746 EmitBlock(NewNotNull); 747 } 748 749 assert((AllocSize == AllocSizeWithoutCookie) == 750 CalculateCookiePadding(*this, E).isZero()); 751 if (AllocSize != AllocSizeWithoutCookie) { 752 assert(E->isArray()); 753 NewPtr = CGM.getCXXABI().InitializeArrayCookie(CGF, NewPtr, NumElements, 754 AllocType); 755 } 756 757 const llvm::Type *ElementPtrTy 758 = ConvertTypeForMem(AllocType)->getPointerTo(AS); 759 NewPtr = Builder.CreateBitCast(NewPtr, ElementPtrTy); 760 if (E->isArray()) { 761 EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie); 762 763 // NewPtr is a pointer to the base element type. If we're 764 // allocating an array of arrays, we'll need to cast back to the 765 // array pointer type. 766 const llvm::Type *ResultTy = ConvertTypeForMem(E->getType()); 767 if (NewPtr->getType() != ResultTy) 768 NewPtr = Builder.CreateBitCast(NewPtr, ResultTy); 769 } else { 770 EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie); 771 } 772 773 if (NullCheckResult) { 774 Builder.CreateBr(NewEnd); 775 llvm::BasicBlock *NotNullSource = Builder.GetInsertBlock(); 776 EmitBlock(NewEnd); 777 778 llvm::PHINode *PHI = Builder.CreatePHI(NewPtr->getType()); 779 PHI->reserveOperandSpace(2); 780 PHI->addIncoming(NewPtr, NotNullSource); 781 PHI->addIncoming(llvm::Constant::getNullValue(NewPtr->getType()), 782 NullCheckSource); 783 784 NewPtr = PHI; 785 } 786 787 return NewPtr; 788} 789 790void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD, 791 llvm::Value *Ptr, 792 QualType DeleteTy) { 793 assert(DeleteFD->getOverloadedOperator() == OO_Delete); 794 795 const FunctionProtoType *DeleteFTy = 796 DeleteFD->getType()->getAs<FunctionProtoType>(); 797 798 CallArgList DeleteArgs; 799 800 // Check if we need to pass the size to the delete operator. 801 llvm::Value *Size = 0; 802 QualType SizeTy; 803 if (DeleteFTy->getNumArgs() == 2) { 804 SizeTy = DeleteFTy->getArgType(1); 805 CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy); 806 Size = llvm::ConstantInt::get(ConvertType(SizeTy), 807 DeleteTypeSize.getQuantity()); 808 } 809 810 QualType ArgTy = DeleteFTy->getArgType(0); 811 llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy)); 812 DeleteArgs.push_back(std::make_pair(RValue::get(DeletePtr), ArgTy)); 813 814 if (Size) 815 DeleteArgs.push_back(std::make_pair(RValue::get(Size), SizeTy)); 816 817 // Emit the call to delete. 818 EmitCall(CGM.getTypes().getFunctionInfo(DeleteArgs, DeleteFTy), 819 CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(), 820 DeleteArgs, DeleteFD); 821} 822 823namespace { 824 /// Calls the given 'operator delete' on a single object. 825 struct CallObjectDelete : EHScopeStack::Cleanup { 826 llvm::Value *Ptr; 827 const FunctionDecl *OperatorDelete; 828 QualType ElementType; 829 830 CallObjectDelete(llvm::Value *Ptr, 831 const FunctionDecl *OperatorDelete, 832 QualType ElementType) 833 : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {} 834 835 void Emit(CodeGenFunction &CGF, bool IsForEH) { 836 CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType); 837 } 838 }; 839} 840 841/// Emit the code for deleting a single object. 842static void EmitObjectDelete(CodeGenFunction &CGF, 843 const FunctionDecl *OperatorDelete, 844 llvm::Value *Ptr, 845 QualType ElementType) { 846 // Find the destructor for the type, if applicable. If the 847 // destructor is virtual, we'll just emit the vcall and return. 848 const CXXDestructorDecl *Dtor = 0; 849 if (const RecordType *RT = ElementType->getAs<RecordType>()) { 850 CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 851 if (!RD->hasTrivialDestructor()) { 852 Dtor = RD->getDestructor(); 853 854 if (Dtor->isVirtual()) { 855 const llvm::Type *Ty = 856 CGF.getTypes().GetFunctionType(CGF.getTypes().getFunctionInfo(Dtor), 857 /*isVariadic=*/false); 858 859 llvm::Value *Callee 860 = CGF.BuildVirtualCall(Dtor, Dtor_Deleting, Ptr, Ty); 861 CGF.EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0, 862 0, 0); 863 864 // The dtor took care of deleting the object. 865 return; 866 } 867 } 868 } 869 870 // Make sure that we call delete even if the dtor throws. 871 CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, 872 Ptr, OperatorDelete, ElementType); 873 874 if (Dtor) 875 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, 876 /*ForVirtualBase=*/false, Ptr); 877 878 CGF.PopCleanupBlock(); 879} 880 881namespace { 882 /// Calls the given 'operator delete' on an array of objects. 883 struct CallArrayDelete : EHScopeStack::Cleanup { 884 llvm::Value *Ptr; 885 const FunctionDecl *OperatorDelete; 886 llvm::Value *NumElements; 887 QualType ElementType; 888 CharUnits CookieSize; 889 890 CallArrayDelete(llvm::Value *Ptr, 891 const FunctionDecl *OperatorDelete, 892 llvm::Value *NumElements, 893 QualType ElementType, 894 CharUnits CookieSize) 895 : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements), 896 ElementType(ElementType), CookieSize(CookieSize) {} 897 898 void Emit(CodeGenFunction &CGF, bool IsForEH) { 899 const FunctionProtoType *DeleteFTy = 900 OperatorDelete->getType()->getAs<FunctionProtoType>(); 901 assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2); 902 903 CallArgList Args; 904 905 // Pass the pointer as the first argument. 906 QualType VoidPtrTy = DeleteFTy->getArgType(0); 907 llvm::Value *DeletePtr 908 = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy)); 909 Args.push_back(std::make_pair(RValue::get(DeletePtr), VoidPtrTy)); 910 911 // Pass the original requested size as the second argument. 912 if (DeleteFTy->getNumArgs() == 2) { 913 QualType size_t = DeleteFTy->getArgType(1); 914 const llvm::IntegerType *SizeTy 915 = cast<llvm::IntegerType>(CGF.ConvertType(size_t)); 916 917 CharUnits ElementTypeSize = 918 CGF.CGM.getContext().getTypeSizeInChars(ElementType); 919 920 // The size of an element, multiplied by the number of elements. 921 llvm::Value *Size 922 = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity()); 923 Size = CGF.Builder.CreateMul(Size, NumElements); 924 925 // Plus the size of the cookie if applicable. 926 if (!CookieSize.isZero()) { 927 llvm::Value *CookieSizeV 928 = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity()); 929 Size = CGF.Builder.CreateAdd(Size, CookieSizeV); 930 } 931 932 Args.push_back(std::make_pair(RValue::get(Size), size_t)); 933 } 934 935 // Emit the call to delete. 936 CGF.EmitCall(CGF.getTypes().getFunctionInfo(Args, DeleteFTy), 937 CGF.CGM.GetAddrOfFunction(OperatorDelete), 938 ReturnValueSlot(), Args, OperatorDelete); 939 } 940 }; 941} 942 943/// Emit the code for deleting an array of objects. 944static void EmitArrayDelete(CodeGenFunction &CGF, 945 const FunctionDecl *OperatorDelete, 946 llvm::Value *Ptr, 947 QualType ElementType) { 948 llvm::Value *NumElements = 0; 949 llvm::Value *AllocatedPtr = 0; 950 CharUnits CookieSize; 951 CGF.CGM.getCXXABI().ReadArrayCookie(CGF, Ptr, ElementType, 952 NumElements, AllocatedPtr, CookieSize); 953 954 assert(AllocatedPtr && "ReadArrayCookie didn't set AllocatedPtr"); 955 956 // Make sure that we call delete even if one of the dtors throws. 957 CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup, 958 AllocatedPtr, OperatorDelete, 959 NumElements, ElementType, 960 CookieSize); 961 962 if (const CXXRecordDecl *RD = ElementType->getAsCXXRecordDecl()) { 963 if (!RD->hasTrivialDestructor()) { 964 assert(NumElements && "ReadArrayCookie didn't find element count" 965 " for a class with destructor"); 966 CGF.EmitCXXAggrDestructorCall(RD->getDestructor(), NumElements, Ptr); 967 } 968 } 969 970 CGF.PopCleanupBlock(); 971} 972 973void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) { 974 975 // Get at the argument before we performed the implicit conversion 976 // to void*. 977 const Expr *Arg = E->getArgument(); 978 while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) { 979 if (ICE->getCastKind() != CK_UserDefinedConversion && 980 ICE->getType()->isVoidPointerType()) 981 Arg = ICE->getSubExpr(); 982 else 983 break; 984 } 985 986 llvm::Value *Ptr = EmitScalarExpr(Arg); 987 988 // Null check the pointer. 989 llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull"); 990 llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end"); 991 992 llvm::Value *IsNull = 993 Builder.CreateICmpEQ(Ptr, llvm::Constant::getNullValue(Ptr->getType()), 994 "isnull"); 995 996 Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull); 997 EmitBlock(DeleteNotNull); 998 999 // We might be deleting a pointer to array. If so, GEP down to the 1000 // first non-array element. 1001 // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*) 1002 QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType(); 1003 if (DeleteTy->isConstantArrayType()) { 1004 llvm::Value *Zero = Builder.getInt32(0); 1005 llvm::SmallVector<llvm::Value*,8> GEP; 1006 1007 GEP.push_back(Zero); // point at the outermost array 1008 1009 // For each layer of array type we're pointing at: 1010 while (const ConstantArrayType *Arr 1011 = getContext().getAsConstantArrayType(DeleteTy)) { 1012 // 1. Unpeel the array type. 1013 DeleteTy = Arr->getElementType(); 1014 1015 // 2. GEP to the first element of the array. 1016 GEP.push_back(Zero); 1017 } 1018 1019 Ptr = Builder.CreateInBoundsGEP(Ptr, GEP.begin(), GEP.end(), "del.first"); 1020 } 1021 1022 assert(ConvertTypeForMem(DeleteTy) == 1023 cast<llvm::PointerType>(Ptr->getType())->getElementType()); 1024 1025 if (E->isArrayForm()) { 1026 EmitArrayDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy); 1027 } else { 1028 EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy); 1029 } 1030 1031 EmitBlock(DeleteEnd); 1032} 1033 1034llvm::Value * CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) { 1035 QualType Ty = E->getType(); 1036 const llvm::Type *LTy = ConvertType(Ty)->getPointerTo(); 1037 1038 if (E->isTypeOperand()) { 1039 llvm::Constant *TypeInfo = 1040 CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand()); 1041 return Builder.CreateBitCast(TypeInfo, LTy); 1042 } 1043 1044 Expr *subE = E->getExprOperand(); 1045 Ty = subE->getType(); 1046 CanQualType CanTy = CGM.getContext().getCanonicalType(Ty); 1047 Ty = CanTy.getUnqualifiedType().getNonReferenceType(); 1048 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1049 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 1050 if (RD->isPolymorphic()) { 1051 // FIXME: if subE is an lvalue do 1052 LValue Obj = EmitLValue(subE); 1053 llvm::Value *This = Obj.getAddress(); 1054 LTy = LTy->getPointerTo()->getPointerTo(); 1055 llvm::Value *V = Builder.CreateBitCast(This, LTy); 1056 // We need to do a zero check for *p, unless it has NonNullAttr. 1057 // FIXME: PointerType->hasAttr<NonNullAttr>() 1058 bool CanBeZero = false; 1059 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(subE->IgnoreParens())) 1060 if (UO->getOpcode() == UO_Deref) 1061 CanBeZero = true; 1062 if (CanBeZero) { 1063 llvm::BasicBlock *NonZeroBlock = createBasicBlock(); 1064 llvm::BasicBlock *ZeroBlock = createBasicBlock(); 1065 1066 llvm::Value *Zero = llvm::Constant::getNullValue(LTy); 1067 Builder.CreateCondBr(Builder.CreateICmpNE(V, Zero), 1068 NonZeroBlock, ZeroBlock); 1069 EmitBlock(ZeroBlock); 1070 /// Call __cxa_bad_typeid 1071 const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext); 1072 const llvm::FunctionType *FTy; 1073 FTy = llvm::FunctionType::get(ResultType, false); 1074 llvm::Value *F = CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid"); 1075 Builder.CreateCall(F)->setDoesNotReturn(); 1076 Builder.CreateUnreachable(); 1077 EmitBlock(NonZeroBlock); 1078 } 1079 V = Builder.CreateLoad(V, "vtable"); 1080 V = Builder.CreateConstInBoundsGEP1_64(V, -1ULL); 1081 V = Builder.CreateLoad(V); 1082 return V; 1083 } 1084 } 1085 return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(Ty), LTy); 1086} 1087 1088llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *V, 1089 const CXXDynamicCastExpr *DCE) { 1090 QualType SrcTy = DCE->getSubExpr()->getType(); 1091 QualType DestTy = DCE->getTypeAsWritten(); 1092 QualType InnerType = DestTy->getPointeeType(); 1093 1094 const llvm::Type *LTy = ConvertType(DCE->getType()); 1095 1096 bool CanBeZero = false; 1097 bool ToVoid = false; 1098 bool ThrowOnBad = false; 1099 if (DestTy->isPointerType()) { 1100 // FIXME: if PointerType->hasAttr<NonNullAttr>(), we don't set this 1101 CanBeZero = true; 1102 if (InnerType->isVoidType()) 1103 ToVoid = true; 1104 } else { 1105 LTy = LTy->getPointerTo(); 1106 1107 // FIXME: What if exceptions are disabled? 1108 ThrowOnBad = true; 1109 } 1110 1111 if (SrcTy->isPointerType() || SrcTy->isReferenceType()) 1112 SrcTy = SrcTy->getPointeeType(); 1113 SrcTy = SrcTy.getUnqualifiedType(); 1114 1115 if (DestTy->isPointerType() || DestTy->isReferenceType()) 1116 DestTy = DestTy->getPointeeType(); 1117 DestTy = DestTy.getUnqualifiedType(); 1118 1119 llvm::BasicBlock *ContBlock = createBasicBlock(); 1120 llvm::BasicBlock *NullBlock = 0; 1121 llvm::BasicBlock *NonZeroBlock = 0; 1122 if (CanBeZero) { 1123 NonZeroBlock = createBasicBlock(); 1124 NullBlock = createBasicBlock(); 1125 Builder.CreateCondBr(Builder.CreateIsNotNull(V), NonZeroBlock, NullBlock); 1126 EmitBlock(NonZeroBlock); 1127 } 1128 1129 llvm::BasicBlock *BadCastBlock = 0; 1130 1131 const llvm::Type *PtrDiffTy = ConvertType(getContext().getPointerDiffType()); 1132 1133 // See if this is a dynamic_cast(void*) 1134 if (ToVoid) { 1135 llvm::Value *This = V; 1136 V = Builder.CreateBitCast(This, PtrDiffTy->getPointerTo()->getPointerTo()); 1137 V = Builder.CreateLoad(V, "vtable"); 1138 V = Builder.CreateConstInBoundsGEP1_64(V, -2ULL); 1139 V = Builder.CreateLoad(V, "offset to top"); 1140 This = Builder.CreateBitCast(This, llvm::Type::getInt8PtrTy(VMContext)); 1141 V = Builder.CreateInBoundsGEP(This, V); 1142 V = Builder.CreateBitCast(V, LTy); 1143 } else { 1144 /// Call __dynamic_cast 1145 const llvm::Type *ResultType = llvm::Type::getInt8PtrTy(VMContext); 1146 const llvm::FunctionType *FTy; 1147 std::vector<const llvm::Type*> ArgTys; 1148 const llvm::Type *PtrToInt8Ty 1149 = llvm::Type::getInt8Ty(VMContext)->getPointerTo(); 1150 ArgTys.push_back(PtrToInt8Ty); 1151 ArgTys.push_back(PtrToInt8Ty); 1152 ArgTys.push_back(PtrToInt8Ty); 1153 ArgTys.push_back(PtrDiffTy); 1154 FTy = llvm::FunctionType::get(ResultType, ArgTys, false); 1155 1156 // FIXME: Calculate better hint. 1157 llvm::Value *hint = llvm::ConstantInt::get(PtrDiffTy, -1ULL); 1158 1159 assert(SrcTy->isRecordType() && "Src type must be record type!"); 1160 assert(DestTy->isRecordType() && "Dest type must be record type!"); 1161 1162 llvm::Value *SrcArg 1163 = CGM.GetAddrOfRTTIDescriptor(SrcTy.getUnqualifiedType()); 1164 llvm::Value *DestArg 1165 = CGM.GetAddrOfRTTIDescriptor(DestTy.getUnqualifiedType()); 1166 1167 V = Builder.CreateBitCast(V, PtrToInt8Ty); 1168 V = Builder.CreateCall4(CGM.CreateRuntimeFunction(FTy, "__dynamic_cast"), 1169 V, SrcArg, DestArg, hint); 1170 V = Builder.CreateBitCast(V, LTy); 1171 1172 if (ThrowOnBad) { 1173 BadCastBlock = createBasicBlock(); 1174 Builder.CreateCondBr(Builder.CreateIsNotNull(V), ContBlock, BadCastBlock); 1175 EmitBlock(BadCastBlock); 1176 /// Invoke __cxa_bad_cast 1177 ResultType = llvm::Type::getVoidTy(VMContext); 1178 const llvm::FunctionType *FBadTy; 1179 FBadTy = llvm::FunctionType::get(ResultType, false); 1180 llvm::Value *F = CGM.CreateRuntimeFunction(FBadTy, "__cxa_bad_cast"); 1181 if (llvm::BasicBlock *InvokeDest = getInvokeDest()) { 1182 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 1183 Builder.CreateInvoke(F, Cont, InvokeDest)->setDoesNotReturn(); 1184 EmitBlock(Cont); 1185 } else { 1186 // FIXME: Does this ever make sense? 1187 Builder.CreateCall(F)->setDoesNotReturn(); 1188 } 1189 Builder.CreateUnreachable(); 1190 } 1191 } 1192 1193 if (CanBeZero) { 1194 Builder.CreateBr(ContBlock); 1195 EmitBlock(NullBlock); 1196 Builder.CreateBr(ContBlock); 1197 } 1198 EmitBlock(ContBlock); 1199 if (CanBeZero) { 1200 llvm::PHINode *PHI = Builder.CreatePHI(LTy); 1201 PHI->reserveOperandSpace(2); 1202 PHI->addIncoming(V, NonZeroBlock); 1203 PHI->addIncoming(llvm::Constant::getNullValue(LTy), NullBlock); 1204 V = PHI; 1205 } 1206 1207 return V; 1208} 1209