CodeGenFunction.cpp revision 745da3a5bb4ea35f93f50301e7fbbb7d78d3b6bb
1//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This coordinates the per-function state used while generating code. 11// 12//===----------------------------------------------------------------------===// 13 14#include "CodeGenFunction.h" 15#include "CodeGenModule.h" 16#include "CGCXXABI.h" 17#include "CGDebugInfo.h" 18#include "CGException.h" 19#include "clang/Basic/TargetInfo.h" 20#include "clang/AST/APValue.h" 21#include "clang/AST/ASTContext.h" 22#include "clang/AST/Decl.h" 23#include "clang/AST/DeclCXX.h" 24#include "clang/AST/StmtCXX.h" 25#include "clang/Frontend/CodeGenOptions.h" 26#include "llvm/Target/TargetData.h" 27#include "llvm/Intrinsics.h" 28using namespace clang; 29using namespace CodeGen; 30 31static void ResolveAllBranchFixups(CodeGenFunction &CGF, 32 llvm::SwitchInst *Switch, 33 llvm::BasicBlock *CleanupEntry); 34 35CodeGenFunction::CodeGenFunction(CodeGenModule &cgm) 36 : BlockFunction(cgm, *this, Builder), CGM(cgm), 37 Target(CGM.getContext().Target), 38 Builder(cgm.getModule().getContext()), 39 NormalCleanupDest(0), EHCleanupDest(0), NextCleanupDestIndex(1), 40 ExceptionSlot(0), DebugInfo(0), IndirectBranch(0), 41 SwitchInsn(0), CaseRangeBlock(0), 42 DidCallStackSave(false), UnreachableBlock(0), 43 CXXThisDecl(0), CXXThisValue(0), CXXVTTDecl(0), CXXVTTValue(0), 44 ConditionalBranchLevel(0), TerminateLandingPad(0), TerminateHandler(0), 45 TrapBB(0) { 46 47 // Get some frequently used types. 48 LLVMPointerWidth = Target.getPointerWidth(0); 49 llvm::LLVMContext &LLVMContext = CGM.getLLVMContext(); 50 IntPtrTy = llvm::IntegerType::get(LLVMContext, LLVMPointerWidth); 51 Int32Ty = llvm::Type::getInt32Ty(LLVMContext); 52 Int64Ty = llvm::Type::getInt64Ty(LLVMContext); 53 54 Exceptions = getContext().getLangOptions().Exceptions; 55 CatchUndefined = getContext().getLangOptions().CatchUndefined; 56 CGM.getCXXABI().getMangleContext().startNewFunction(); 57} 58 59ASTContext &CodeGenFunction::getContext() const { 60 return CGM.getContext(); 61} 62 63 64const llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) { 65 return CGM.getTypes().ConvertTypeForMem(T); 66} 67 68const llvm::Type *CodeGenFunction::ConvertType(QualType T) { 69 return CGM.getTypes().ConvertType(T); 70} 71 72bool CodeGenFunction::hasAggregateLLVMType(QualType T) { 73 return T->isRecordType() || T->isArrayType() || T->isAnyComplexType() || 74 T->isObjCObjectType(); 75} 76 77void CodeGenFunction::EmitReturnBlock() { 78 // For cleanliness, we try to avoid emitting the return block for 79 // simple cases. 80 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 81 82 if (CurBB) { 83 assert(!CurBB->getTerminator() && "Unexpected terminated block."); 84 85 // We have a valid insert point, reuse it if it is empty or there are no 86 // explicit jumps to the return block. 87 if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) { 88 ReturnBlock.getBlock()->replaceAllUsesWith(CurBB); 89 delete ReturnBlock.getBlock(); 90 } else 91 EmitBlock(ReturnBlock.getBlock()); 92 return; 93 } 94 95 // Otherwise, if the return block is the target of a single direct 96 // branch then we can just put the code in that block instead. This 97 // cleans up functions which started with a unified return block. 98 if (ReturnBlock.getBlock()->hasOneUse()) { 99 llvm::BranchInst *BI = 100 dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->use_begin()); 101 if (BI && BI->isUnconditional() && 102 BI->getSuccessor(0) == ReturnBlock.getBlock()) { 103 // Reset insertion point and delete the branch. 104 Builder.SetInsertPoint(BI->getParent()); 105 BI->eraseFromParent(); 106 delete ReturnBlock.getBlock(); 107 return; 108 } 109 } 110 111 // FIXME: We are at an unreachable point, there is no reason to emit the block 112 // unless it has uses. However, we still need a place to put the debug 113 // region.end for now. 114 115 EmitBlock(ReturnBlock.getBlock()); 116} 117 118static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) { 119 if (!BB) return; 120 if (!BB->use_empty()) 121 return CGF.CurFn->getBasicBlockList().push_back(BB); 122 delete BB; 123} 124 125void CodeGenFunction::FinishFunction(SourceLocation EndLoc) { 126 assert(BreakContinueStack.empty() && 127 "mismatched push/pop in break/continue stack!"); 128 129 // Emit function epilog (to return). 130 EmitReturnBlock(); 131 132 EmitFunctionInstrumentation("__cyg_profile_func_exit"); 133 134 // Emit debug descriptor for function end. 135 if (CGDebugInfo *DI = getDebugInfo()) { 136 DI->setLocation(EndLoc); 137 DI->EmitFunctionEnd(Builder); 138 } 139 140 EmitFunctionEpilog(*CurFnInfo); 141 EmitEndEHSpec(CurCodeDecl); 142 143 assert(EHStack.empty() && 144 "did not remove all scopes from cleanup stack!"); 145 146 // If someone did an indirect goto, emit the indirect goto block at the end of 147 // the function. 148 if (IndirectBranch) { 149 EmitBlock(IndirectBranch->getParent()); 150 Builder.ClearInsertionPoint(); 151 } 152 153 // Remove the AllocaInsertPt instruction, which is just a convenience for us. 154 llvm::Instruction *Ptr = AllocaInsertPt; 155 AllocaInsertPt = 0; 156 Ptr->eraseFromParent(); 157 158 // If someone took the address of a label but never did an indirect goto, we 159 // made a zero entry PHI node, which is illegal, zap it now. 160 if (IndirectBranch) { 161 llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress()); 162 if (PN->getNumIncomingValues() == 0) { 163 PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType())); 164 PN->eraseFromParent(); 165 } 166 } 167 168 EmitIfUsed(*this, RethrowBlock.getBlock()); 169 EmitIfUsed(*this, TerminateLandingPad); 170 EmitIfUsed(*this, TerminateHandler); 171 EmitIfUsed(*this, UnreachableBlock); 172 173 if (CGM.getCodeGenOpts().EmitDeclMetadata) 174 EmitDeclMetadata(); 175} 176 177/// ShouldInstrumentFunction - Return true if the current function should be 178/// instrumented with __cyg_profile_func_* calls 179bool CodeGenFunction::ShouldInstrumentFunction() { 180 if (!CGM.getCodeGenOpts().InstrumentFunctions) 181 return false; 182 if (CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) 183 return false; 184 return true; 185} 186 187/// EmitFunctionInstrumentation - Emit LLVM code to call the specified 188/// instrumentation function with the current function and the call site, if 189/// function instrumentation is enabled. 190void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) { 191 if (!ShouldInstrumentFunction()) 192 return; 193 194 const llvm::PointerType *PointerTy; 195 const llvm::FunctionType *FunctionTy; 196 std::vector<const llvm::Type*> ProfileFuncArgs; 197 198 // void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site); 199 PointerTy = llvm::Type::getInt8PtrTy(VMContext); 200 ProfileFuncArgs.push_back(PointerTy); 201 ProfileFuncArgs.push_back(PointerTy); 202 FunctionTy = llvm::FunctionType::get( 203 llvm::Type::getVoidTy(VMContext), 204 ProfileFuncArgs, false); 205 206 llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn); 207 llvm::CallInst *CallSite = Builder.CreateCall( 208 CGM.getIntrinsic(llvm::Intrinsic::returnaddress, 0, 0), 209 llvm::ConstantInt::get(Int32Ty, 0), 210 "callsite"); 211 212 Builder.CreateCall2(F, 213 llvm::ConstantExpr::getBitCast(CurFn, PointerTy), 214 CallSite); 215} 216 217void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, 218 llvm::Function *Fn, 219 const FunctionArgList &Args, 220 SourceLocation StartLoc) { 221 const Decl *D = GD.getDecl(); 222 223 DidCallStackSave = false; 224 CurCodeDecl = CurFuncDecl = D; 225 FnRetTy = RetTy; 226 CurFn = Fn; 227 assert(CurFn->isDeclaration() && "Function already has body?"); 228 229 // Pass inline keyword to optimizer if it appears explicitly on any 230 // declaration. 231 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) 232 for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(), 233 RE = FD->redecls_end(); RI != RE; ++RI) 234 if (RI->isInlineSpecified()) { 235 Fn->addFnAttr(llvm::Attribute::InlineHint); 236 break; 237 } 238 239 llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn); 240 241 // Create a marker to make it easy to insert allocas into the entryblock 242 // later. Don't create this with the builder, because we don't want it 243 // folded. 244 llvm::Value *Undef = llvm::UndefValue::get(Int32Ty); 245 AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB); 246 if (Builder.isNamePreserving()) 247 AllocaInsertPt->setName("allocapt"); 248 249 ReturnBlock = getJumpDestInCurrentScope("return"); 250 251 Builder.SetInsertPoint(EntryBB); 252 253 QualType FnType = getContext().getFunctionType(RetTy, 0, 0, false, 0, 254 false, false, 0, 0, 255 /*FIXME?*/ 256 FunctionType::ExtInfo()); 257 258 // Emit subprogram debug descriptor. 259 if (CGDebugInfo *DI = getDebugInfo()) { 260 DI->setLocation(StartLoc); 261 DI->EmitFunctionStart(GD, FnType, CurFn, Builder); 262 } 263 264 EmitFunctionInstrumentation("__cyg_profile_func_enter"); 265 266 // FIXME: Leaked. 267 // CC info is ignored, hopefully? 268 CurFnInfo = &CGM.getTypes().getFunctionInfo(FnRetTy, Args, 269 FunctionType::ExtInfo()); 270 271 if (RetTy->isVoidType()) { 272 // Void type; nothing to return. 273 ReturnValue = 0; 274 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect && 275 hasAggregateLLVMType(CurFnInfo->getReturnType())) { 276 // Indirect aggregate return; emit returned value directly into sret slot. 277 // This reduces code size, and affects correctness in C++. 278 ReturnValue = CurFn->arg_begin(); 279 } else { 280 ReturnValue = CreateIRTemp(RetTy, "retval"); 281 } 282 283 EmitStartEHSpec(CurCodeDecl); 284 EmitFunctionProlog(*CurFnInfo, CurFn, Args); 285 286 if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) 287 CGM.getCXXABI().EmitInstanceFunctionProlog(*this); 288 289 // If any of the arguments have a variably modified type, make sure to 290 // emit the type size. 291 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 292 i != e; ++i) { 293 QualType Ty = i->second; 294 295 if (Ty->isVariablyModifiedType()) 296 EmitVLASize(Ty); 297 } 298} 299 300void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args) { 301 const FunctionDecl *FD = cast<FunctionDecl>(CurGD.getDecl()); 302 assert(FD->getBody()); 303 EmitStmt(FD->getBody()); 304} 305 306/// Tries to mark the given function nounwind based on the 307/// non-existence of any throwing calls within it. We believe this is 308/// lightweight enough to do at -O0. 309static void TryMarkNoThrow(llvm::Function *F) { 310 // LLVM treats 'nounwind' on a function as part of the type, so we 311 // can't do this on functions that can be overwritten. 312 if (F->mayBeOverridden()) return; 313 314 for (llvm::Function::iterator FI = F->begin(), FE = F->end(); FI != FE; ++FI) 315 for (llvm::BasicBlock::iterator 316 BI = FI->begin(), BE = FI->end(); BI != BE; ++BI) 317 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(&*BI)) 318 if (!Call->doesNotThrow()) 319 return; 320 F->setDoesNotThrow(true); 321} 322 323void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn) { 324 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 325 326 // Check if we should generate debug info for this function. 327 if (CGM.getDebugInfo() && !FD->hasAttr<NoDebugAttr>()) 328 DebugInfo = CGM.getDebugInfo(); 329 330 FunctionArgList Args; 331 QualType ResTy = FD->getResultType(); 332 333 CurGD = GD; 334 if (isa<CXXMethodDecl>(FD) && cast<CXXMethodDecl>(FD)->isInstance()) 335 CGM.getCXXABI().BuildInstanceFunctionParams(*this, ResTy, Args); 336 337 if (FD->getNumParams()) { 338 const FunctionProtoType* FProto = FD->getType()->getAs<FunctionProtoType>(); 339 assert(FProto && "Function def must have prototype!"); 340 341 for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) 342 Args.push_back(std::make_pair(FD->getParamDecl(i), 343 FProto->getArgType(i))); 344 } 345 346 SourceRange BodyRange; 347 if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange(); 348 349 // Emit the standard function prologue. 350 StartFunction(GD, ResTy, Fn, Args, BodyRange.getBegin()); 351 352 // Generate the body of the function. 353 if (isa<CXXDestructorDecl>(FD)) 354 EmitDestructorBody(Args); 355 else if (isa<CXXConstructorDecl>(FD)) 356 EmitConstructorBody(Args); 357 else 358 EmitFunctionBody(Args); 359 360 // Emit the standard function epilogue. 361 FinishFunction(BodyRange.getEnd()); 362 363 // If we haven't marked the function nothrow through other means, do 364 // a quick pass now to see if we can. 365 if (!CurFn->doesNotThrow()) 366 TryMarkNoThrow(CurFn); 367} 368 369/// ContainsLabel - Return true if the statement contains a label in it. If 370/// this statement is not executed normally, it not containing a label means 371/// that we can just remove the code. 372bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) { 373 // Null statement, not a label! 374 if (S == 0) return false; 375 376 // If this is a label, we have to emit the code, consider something like: 377 // if (0) { ... foo: bar(); } goto foo; 378 if (isa<LabelStmt>(S)) 379 return true; 380 381 // If this is a case/default statement, and we haven't seen a switch, we have 382 // to emit the code. 383 if (isa<SwitchCase>(S) && !IgnoreCaseStmts) 384 return true; 385 386 // If this is a switch statement, we want to ignore cases below it. 387 if (isa<SwitchStmt>(S)) 388 IgnoreCaseStmts = true; 389 390 // Scan subexpressions for verboten labels. 391 for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end(); 392 I != E; ++I) 393 if (ContainsLabel(*I, IgnoreCaseStmts)) 394 return true; 395 396 return false; 397} 398 399 400/// ConstantFoldsToSimpleInteger - If the sepcified expression does not fold to 401/// a constant, or if it does but contains a label, return 0. If it constant 402/// folds to 'true' and does not contain a label, return 1, if it constant folds 403/// to 'false' and does not contain a label, return -1. 404int CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond) { 405 // FIXME: Rename and handle conversion of other evaluatable things 406 // to bool. 407 Expr::EvalResult Result; 408 if (!Cond->Evaluate(Result, getContext()) || !Result.Val.isInt() || 409 Result.HasSideEffects) 410 return 0; // Not foldable, not integer or not fully evaluatable. 411 412 if (CodeGenFunction::ContainsLabel(Cond)) 413 return 0; // Contains a label. 414 415 return Result.Val.getInt().getBoolValue() ? 1 : -1; 416} 417 418 419/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if 420/// statement) to the specified blocks. Based on the condition, this might try 421/// to simplify the codegen of the conditional based on the branch. 422/// 423void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond, 424 llvm::BasicBlock *TrueBlock, 425 llvm::BasicBlock *FalseBlock) { 426 if (const ParenExpr *PE = dyn_cast<ParenExpr>(Cond)) 427 return EmitBranchOnBoolExpr(PE->getSubExpr(), TrueBlock, FalseBlock); 428 429 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) { 430 // Handle X && Y in a condition. 431 if (CondBOp->getOpcode() == BO_LAnd) { 432 // If we have "1 && X", simplify the code. "0 && X" would have constant 433 // folded if the case was simple enough. 434 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == 1) { 435 // br(1 && X) -> br(X). 436 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock); 437 } 438 439 // If we have "X && 1", simplify the code to use an uncond branch. 440 // "X && 0" would have been constant folded to 0. 441 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == 1) { 442 // br(X && 1) -> br(X). 443 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock); 444 } 445 446 // Emit the LHS as a conditional. If the LHS conditional is false, we 447 // want to jump to the FalseBlock. 448 llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true"); 449 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock); 450 EmitBlock(LHSTrue); 451 452 // Any temporaries created here are conditional. 453 BeginConditionalBranch(); 454 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock); 455 EndConditionalBranch(); 456 457 return; 458 } else if (CondBOp->getOpcode() == BO_LOr) { 459 // If we have "0 || X", simplify the code. "1 || X" would have constant 460 // folded if the case was simple enough. 461 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == -1) { 462 // br(0 || X) -> br(X). 463 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock); 464 } 465 466 // If we have "X || 0", simplify the code to use an uncond branch. 467 // "X || 1" would have been constant folded to 1. 468 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == -1) { 469 // br(X || 0) -> br(X). 470 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock); 471 } 472 473 // Emit the LHS as a conditional. If the LHS conditional is true, we 474 // want to jump to the TrueBlock. 475 llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false"); 476 EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse); 477 EmitBlock(LHSFalse); 478 479 // Any temporaries created here are conditional. 480 BeginConditionalBranch(); 481 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock); 482 EndConditionalBranch(); 483 484 return; 485 } 486 } 487 488 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) { 489 // br(!x, t, f) -> br(x, f, t) 490 if (CondUOp->getOpcode() == UO_LNot) 491 return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock); 492 } 493 494 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) { 495 // Handle ?: operator. 496 497 // Just ignore GNU ?: extension. 498 if (CondOp->getLHS()) { 499 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f)) 500 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true"); 501 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false"); 502 EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock); 503 EmitBlock(LHSBlock); 504 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock); 505 EmitBlock(RHSBlock); 506 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock); 507 return; 508 } 509 } 510 511 // Emit the code with the fully general case. 512 llvm::Value *CondV = EvaluateExprAsBool(Cond); 513 Builder.CreateCondBr(CondV, TrueBlock, FalseBlock); 514} 515 516/// ErrorUnsupported - Print out an error that codegen doesn't support the 517/// specified stmt yet. 518void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type, 519 bool OmitOnError) { 520 CGM.ErrorUnsupported(S, Type, OmitOnError); 521} 522 523void 524CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) { 525 // Ignore empty classes in C++. 526 if (getContext().getLangOptions().CPlusPlus) { 527 if (const RecordType *RT = Ty->getAs<RecordType>()) { 528 if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty()) 529 return; 530 } 531 } 532 533 // Cast the dest ptr to the appropriate i8 pointer type. 534 unsigned DestAS = 535 cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace(); 536 const llvm::Type *BP = 537 llvm::Type::getInt8PtrTy(VMContext, DestAS); 538 if (DestPtr->getType() != BP) 539 DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp"); 540 541 // Get size and alignment info for this aggregate. 542 std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty); 543 uint64_t Size = TypeInfo.first; 544 unsigned Align = TypeInfo.second; 545 546 // Don't bother emitting a zero-byte memset. 547 if (Size == 0) 548 return; 549 550 llvm::ConstantInt *SizeVal = llvm::ConstantInt::get(IntPtrTy, Size / 8); 551 llvm::ConstantInt *AlignVal = Builder.getInt32(Align / 8); 552 553 // If the type contains a pointer to data member we can't memset it to zero. 554 // Instead, create a null constant and copy it to the destination. 555 if (!CGM.getTypes().isZeroInitializable(Ty)) { 556 llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty); 557 558 llvm::GlobalVariable *NullVariable = 559 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(), 560 /*isConstant=*/true, 561 llvm::GlobalVariable::PrivateLinkage, 562 NullConstant, llvm::Twine()); 563 llvm::Value *SrcPtr = 564 Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()); 565 566 // FIXME: variable-size types? 567 568 // Get and call the appropriate llvm.memcpy overload. 569 llvm::Constant *Memcpy = 570 CGM.getMemCpyFn(DestPtr->getType(), SrcPtr->getType(), IntPtrTy); 571 Builder.CreateCall5(Memcpy, DestPtr, SrcPtr, SizeVal, AlignVal, 572 /*volatile*/ Builder.getFalse()); 573 return; 574 } 575 576 // Otherwise, just memset the whole thing to zero. This is legal 577 // because in LLVM, all default initializers (other than the ones we just 578 // handled above) are guaranteed to have a bit pattern of all zeros. 579 580 // FIXME: Handle variable sized types. 581 Builder.CreateCall5(CGM.getMemSetFn(BP, IntPtrTy), DestPtr, 582 Builder.getInt8(0), 583 SizeVal, AlignVal, /*volatile*/ Builder.getFalse()); 584} 585 586llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelStmt *L) { 587 // Make sure that there is a block for the indirect goto. 588 if (IndirectBranch == 0) 589 GetIndirectGotoBlock(); 590 591 llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock(); 592 593 // Make sure the indirect branch includes all of the address-taken blocks. 594 IndirectBranch->addDestination(BB); 595 return llvm::BlockAddress::get(CurFn, BB); 596} 597 598llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() { 599 // If we already made the indirect branch for indirect goto, return its block. 600 if (IndirectBranch) return IndirectBranch->getParent(); 601 602 CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto")); 603 604 const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext); 605 606 // Create the PHI node that indirect gotos will add entries to. 607 llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, "indirect.goto.dest"); 608 609 // Create the indirect branch instruction. 610 IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal); 611 return IndirectBranch->getParent(); 612} 613 614llvm::Value *CodeGenFunction::GetVLASize(const VariableArrayType *VAT) { 615 llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()]; 616 617 assert(SizeEntry && "Did not emit size for type"); 618 return SizeEntry; 619} 620 621llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty) { 622 assert(Ty->isVariablyModifiedType() && 623 "Must pass variably modified type to EmitVLASizes!"); 624 625 EnsureInsertPoint(); 626 627 if (const VariableArrayType *VAT = getContext().getAsVariableArrayType(Ty)) { 628 // unknown size indication requires no size computation. 629 if (!VAT->getSizeExpr()) 630 return 0; 631 llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()]; 632 633 if (!SizeEntry) { 634 const llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); 635 636 // Get the element size; 637 QualType ElemTy = VAT->getElementType(); 638 llvm::Value *ElemSize; 639 if (ElemTy->isVariableArrayType()) 640 ElemSize = EmitVLASize(ElemTy); 641 else 642 ElemSize = llvm::ConstantInt::get(SizeTy, 643 getContext().getTypeSizeInChars(ElemTy).getQuantity()); 644 645 llvm::Value *NumElements = EmitScalarExpr(VAT->getSizeExpr()); 646 NumElements = Builder.CreateIntCast(NumElements, SizeTy, false, "tmp"); 647 648 SizeEntry = Builder.CreateMul(ElemSize, NumElements); 649 } 650 651 return SizeEntry; 652 } 653 654 if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) { 655 EmitVLASize(AT->getElementType()); 656 return 0; 657 } 658 659 const PointerType *PT = Ty->getAs<PointerType>(); 660 assert(PT && "unknown VM type!"); 661 EmitVLASize(PT->getPointeeType()); 662 return 0; 663} 664 665llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) { 666 if (CGM.getContext().getBuiltinVaListType()->isArrayType()) 667 return EmitScalarExpr(E); 668 return EmitLValue(E).getAddress(); 669} 670 671/// Pops cleanup blocks until the given savepoint is reached. 672void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) { 673 assert(Old.isValid()); 674 675 while (EHStack.stable_begin() != Old) { 676 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin()); 677 678 // As long as Old strictly encloses the scope's enclosing normal 679 // cleanup, we're going to emit another normal cleanup which 680 // fallthrough can propagate through. 681 bool FallThroughIsBranchThrough = 682 Old.strictlyEncloses(Scope.getEnclosingNormalCleanup()); 683 684 PopCleanupBlock(FallThroughIsBranchThrough); 685 } 686} 687 688static llvm::BasicBlock *CreateNormalEntry(CodeGenFunction &CGF, 689 EHCleanupScope &Scope) { 690 assert(Scope.isNormalCleanup()); 691 llvm::BasicBlock *Entry = Scope.getNormalBlock(); 692 if (!Entry) { 693 Entry = CGF.createBasicBlock("cleanup"); 694 Scope.setNormalBlock(Entry); 695 } 696 return Entry; 697} 698 699static llvm::BasicBlock *CreateEHEntry(CodeGenFunction &CGF, 700 EHCleanupScope &Scope) { 701 assert(Scope.isEHCleanup()); 702 llvm::BasicBlock *Entry = Scope.getEHBlock(); 703 if (!Entry) { 704 Entry = CGF.createBasicBlock("eh.cleanup"); 705 Scope.setEHBlock(Entry); 706 } 707 return Entry; 708} 709 710/// Transitions the terminator of the given exit-block of a cleanup to 711/// be a cleanup switch. 712static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF, 713 llvm::BasicBlock *Block) { 714 // If it's a branch, turn it into a switch whose default 715 // destination is its original target. 716 llvm::TerminatorInst *Term = Block->getTerminator(); 717 assert(Term && "can't transition block without terminator"); 718 719 if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) { 720 assert(Br->isUnconditional()); 721 llvm::LoadInst *Load = 722 new llvm::LoadInst(CGF.getNormalCleanupDestSlot(), "cleanup.dest", Term); 723 llvm::SwitchInst *Switch = 724 llvm::SwitchInst::Create(Load, Br->getSuccessor(0), 4, Block); 725 Br->eraseFromParent(); 726 return Switch; 727 } else { 728 return cast<llvm::SwitchInst>(Term); 729 } 730} 731 732/// Attempts to reduce a cleanup's entry block to a fallthrough. This 733/// is basically llvm::MergeBlockIntoPredecessor, except 734/// simplified/optimized for the tighter constraints on cleanup blocks. 735/// 736/// Returns the new block, whatever it is. 737static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF, 738 llvm::BasicBlock *Entry) { 739 llvm::BasicBlock *Pred = Entry->getSinglePredecessor(); 740 if (!Pred) return Entry; 741 742 llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator()); 743 if (!Br || Br->isConditional()) return Entry; 744 assert(Br->getSuccessor(0) == Entry); 745 746 // If we were previously inserting at the end of the cleanup entry 747 // block, we'll need to continue inserting at the end of the 748 // predecessor. 749 bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry; 750 assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end()); 751 752 // Kill the branch. 753 Br->eraseFromParent(); 754 755 // Merge the blocks. 756 Pred->getInstList().splice(Pred->end(), Entry->getInstList()); 757 758 // Kill the entry block. 759 Entry->eraseFromParent(); 760 761 if (WasInsertBlock) 762 CGF.Builder.SetInsertPoint(Pred); 763 764 return Pred; 765} 766 767static void EmitCleanup(CodeGenFunction &CGF, 768 EHScopeStack::Cleanup *Fn, 769 bool ForEH, 770 llvm::Value *ActiveFlag) { 771 // EH cleanups always occur within a terminate scope. 772 if (ForEH) CGF.EHStack.pushTerminate(); 773 774 // If there's an active flag, load it and skip the cleanup if it's 775 // false. 776 llvm::BasicBlock *ContBB = 0; 777 if (ActiveFlag) { 778 ContBB = CGF.createBasicBlock("cleanup.done"); 779 llvm::BasicBlock *CleanupBB = CGF.createBasicBlock("cleanup.action"); 780 llvm::Value *IsActive 781 = CGF.Builder.CreateLoad(ActiveFlag, "cleanup.is_active"); 782 CGF.Builder.CreateCondBr(IsActive, CleanupBB, ContBB); 783 CGF.EmitBlock(CleanupBB); 784 } 785 786 // Ask the cleanup to emit itself. 787 Fn->Emit(CGF, ForEH); 788 assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?"); 789 790 // Emit the continuation block if there was an active flag. 791 if (ActiveFlag) 792 CGF.EmitBlock(ContBB); 793 794 // Leave the terminate scope. 795 if (ForEH) CGF.EHStack.popTerminate(); 796} 797 798static void ForwardPrebranchedFallthrough(llvm::BasicBlock *Exit, 799 llvm::BasicBlock *From, 800 llvm::BasicBlock *To) { 801 // Exit is the exit block of a cleanup, so it always terminates in 802 // an unconditional branch or a switch. 803 llvm::TerminatorInst *Term = Exit->getTerminator(); 804 805 if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) { 806 assert(Br->isUnconditional() && Br->getSuccessor(0) == From); 807 Br->setSuccessor(0, To); 808 } else { 809 llvm::SwitchInst *Switch = cast<llvm::SwitchInst>(Term); 810 for (unsigned I = 0, E = Switch->getNumSuccessors(); I != E; ++I) 811 if (Switch->getSuccessor(I) == From) 812 Switch->setSuccessor(I, To); 813 } 814} 815 816/// Pops a cleanup block. If the block includes a normal cleanup, the 817/// current insertion point is threaded through the cleanup, as are 818/// any branch fixups on the cleanup. 819void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { 820 assert(!EHStack.empty() && "cleanup stack is empty!"); 821 assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!"); 822 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin()); 823 assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups()); 824 825 // Remember activation information. 826 bool IsActive = Scope.isActive(); 827 llvm::Value *NormalActiveFlag = 828 Scope.shouldTestFlagInNormalCleanup() ? Scope.getActiveFlag() : 0; 829 llvm::Value *EHActiveFlag = 830 Scope.shouldTestFlagInEHCleanup() ? Scope.getActiveFlag() : 0; 831 832 // Check whether we need an EH cleanup. This is only true if we've 833 // generated a lazy EH cleanup block. 834 bool RequiresEHCleanup = Scope.hasEHBranches(); 835 836 // Check the three conditions which might require a normal cleanup: 837 838 // - whether there are branch fix-ups through this cleanup 839 unsigned FixupDepth = Scope.getFixupDepth(); 840 bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth; 841 842 // - whether there are branch-throughs or branch-afters 843 bool HasExistingBranches = Scope.hasBranches(); 844 845 // - whether there's a fallthrough 846 llvm::BasicBlock *FallthroughSource = Builder.GetInsertBlock(); 847 bool HasFallthrough = (FallthroughSource != 0 && IsActive); 848 849 // As a kindof crazy internal case, branch-through fall-throughs 850 // leave the insertion point set to the end of the last cleanup. 851 bool HasPrebranchedFallthrough = 852 (FallthroughSource && FallthroughSource->getTerminator()); 853 854 bool RequiresNormalCleanup = false; 855 if (Scope.isNormalCleanup() && 856 (HasFixups || HasExistingBranches || HasFallthrough)) { 857 RequiresNormalCleanup = true; 858 } 859 860 assert(!HasPrebranchedFallthrough || RequiresNormalCleanup || !IsActive); 861 assert(!HasPrebranchedFallthrough || 862 (Scope.isNormalCleanup() && Scope.getNormalBlock() && 863 FallthroughSource->getTerminator()->getSuccessor(0) 864 == Scope.getNormalBlock())); 865 866 // Even if we don't need the normal cleanup, we might still have 867 // prebranched fallthrough to worry about. 868 if (!RequiresNormalCleanup && HasPrebranchedFallthrough) { 869 assert(!IsActive); 870 871 llvm::BasicBlock *NormalEntry = Scope.getNormalBlock(); 872 873 // If we're branching through this cleanup, just forward the 874 // prebranched fallthrough to the next cleanup, leaving the insert 875 // point in the old block. 876 if (FallthroughIsBranchThrough) { 877 EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup()); 878 llvm::BasicBlock *EnclosingEntry = 879 CreateNormalEntry(*this, cast<EHCleanupScope>(S)); 880 881 ForwardPrebranchedFallthrough(FallthroughSource, 882 NormalEntry, EnclosingEntry); 883 assert(NormalEntry->use_empty() && 884 "uses of entry remain after forwarding?"); 885 delete NormalEntry; 886 887 // Otherwise, we're branching out; just emit the next block. 888 } else { 889 EmitBlock(NormalEntry); 890 SimplifyCleanupEntry(*this, NormalEntry); 891 } 892 } 893 894 // If we don't need the cleanup at all, we're done. 895 if (!RequiresNormalCleanup && !RequiresEHCleanup) { 896 EHStack.popCleanup(); // safe because there are no fixups 897 assert(EHStack.getNumBranchFixups() == 0 || 898 EHStack.hasNormalCleanups()); 899 return; 900 } 901 902 // Copy the cleanup emission data out. Note that SmallVector 903 // guarantees maximal alignment for its buffer regardless of its 904 // type parameter. 905 llvm::SmallVector<char, 8*sizeof(void*)> CleanupBuffer; 906 CleanupBuffer.reserve(Scope.getCleanupSize()); 907 memcpy(CleanupBuffer.data(), 908 Scope.getCleanupBuffer(), Scope.getCleanupSize()); 909 CleanupBuffer.set_size(Scope.getCleanupSize()); 910 EHScopeStack::Cleanup *Fn = 911 reinterpret_cast<EHScopeStack::Cleanup*>(CleanupBuffer.data()); 912 913 // We want to emit the EH cleanup after the normal cleanup, but go 914 // ahead and do the setup for the EH cleanup while the scope is still 915 // alive. 916 llvm::BasicBlock *EHEntry = 0; 917 llvm::SmallVector<llvm::Instruction*, 2> EHInstsToAppend; 918 if (RequiresEHCleanup) { 919 EHEntry = CreateEHEntry(*this, Scope); 920 921 // Figure out the branch-through dest if necessary. 922 llvm::BasicBlock *EHBranchThroughDest = 0; 923 if (Scope.hasEHBranchThroughs()) { 924 assert(Scope.getEnclosingEHCleanup() != EHStack.stable_end()); 925 EHScope &S = *EHStack.find(Scope.getEnclosingEHCleanup()); 926 EHBranchThroughDest = CreateEHEntry(*this, cast<EHCleanupScope>(S)); 927 } 928 929 // If we have exactly one branch-after and no branch-throughs, we 930 // can dispatch it without a switch. 931 if (!Scope.hasEHBranchThroughs() && 932 Scope.getNumEHBranchAfters() == 1) { 933 assert(!EHBranchThroughDest); 934 935 // TODO: remove the spurious eh.cleanup.dest stores if this edge 936 // never went through any switches. 937 llvm::BasicBlock *BranchAfterDest = Scope.getEHBranchAfterBlock(0); 938 EHInstsToAppend.push_back(llvm::BranchInst::Create(BranchAfterDest)); 939 940 // Otherwise, if we have any branch-afters, we need a switch. 941 } else if (Scope.getNumEHBranchAfters()) { 942 // The default of the switch belongs to the branch-throughs if 943 // they exist. 944 llvm::BasicBlock *Default = 945 (EHBranchThroughDest ? EHBranchThroughDest : getUnreachableBlock()); 946 947 const unsigned SwitchCapacity = Scope.getNumEHBranchAfters(); 948 949 llvm::LoadInst *Load = 950 new llvm::LoadInst(getEHCleanupDestSlot(), "cleanup.dest"); 951 llvm::SwitchInst *Switch = 952 llvm::SwitchInst::Create(Load, Default, SwitchCapacity); 953 954 EHInstsToAppend.push_back(Load); 955 EHInstsToAppend.push_back(Switch); 956 957 for (unsigned I = 0, E = Scope.getNumEHBranchAfters(); I != E; ++I) 958 Switch->addCase(Scope.getEHBranchAfterIndex(I), 959 Scope.getEHBranchAfterBlock(I)); 960 961 // Otherwise, we have only branch-throughs; jump to the next EH 962 // cleanup. 963 } else { 964 assert(EHBranchThroughDest); 965 EHInstsToAppend.push_back(llvm::BranchInst::Create(EHBranchThroughDest)); 966 } 967 } 968 969 if (!RequiresNormalCleanup) { 970 EHStack.popCleanup(); 971 } else { 972 // If we have a fallthrough and no other need for the cleanup, 973 // emit it directly. 974 if (HasFallthrough && !HasPrebranchedFallthrough && 975 !HasFixups && !HasExistingBranches) { 976 977 // Fixups can cause us to optimistically create a normal block, 978 // only to later have no real uses for it. Just delete it in 979 // this case. 980 // TODO: we can potentially simplify all the uses after this. 981 if (Scope.getNormalBlock()) { 982 Scope.getNormalBlock()->replaceAllUsesWith(getUnreachableBlock()); 983 delete Scope.getNormalBlock(); 984 } 985 986 EHStack.popCleanup(); 987 988 EmitCleanup(*this, Fn, /*ForEH*/ false, NormalActiveFlag); 989 990 // Otherwise, the best approach is to thread everything through 991 // the cleanup block and then try to clean up after ourselves. 992 } else { 993 // Force the entry block to exist. 994 llvm::BasicBlock *NormalEntry = CreateNormalEntry(*this, Scope); 995 996 // I. Set up the fallthrough edge in. 997 998 // If there's a fallthrough, we need to store the cleanup 999 // destination index. For fall-throughs this is always zero. 1000 if (HasFallthrough) { 1001 if (!HasPrebranchedFallthrough) 1002 Builder.CreateStore(Builder.getInt32(0), getNormalCleanupDestSlot()); 1003 1004 // Otherwise, clear the IP if we don't have fallthrough because 1005 // the cleanup is inactive. We don't need to save it because 1006 // it's still just FallthroughSource. 1007 } else if (FallthroughSource) { 1008 assert(!IsActive && "source without fallthrough for active cleanup"); 1009 Builder.ClearInsertionPoint(); 1010 } 1011 1012 // II. Emit the entry block. This implicitly branches to it if 1013 // we have fallthrough. All the fixups and existing branches 1014 // should already be branched to it. 1015 EmitBlock(NormalEntry); 1016 1017 // III. Figure out where we're going and build the cleanup 1018 // epilogue. 1019 1020 bool HasEnclosingCleanups = 1021 (Scope.getEnclosingNormalCleanup() != EHStack.stable_end()); 1022 1023 // Compute the branch-through dest if we need it: 1024 // - if there are branch-throughs threaded through the scope 1025 // - if fall-through is a branch-through 1026 // - if there are fixups that will be optimistically forwarded 1027 // to the enclosing cleanup 1028 llvm::BasicBlock *BranchThroughDest = 0; 1029 if (Scope.hasBranchThroughs() || 1030 (FallthroughSource && FallthroughIsBranchThrough) || 1031 (HasFixups && HasEnclosingCleanups)) { 1032 assert(HasEnclosingCleanups); 1033 EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup()); 1034 BranchThroughDest = CreateNormalEntry(*this, cast<EHCleanupScope>(S)); 1035 } 1036 1037 llvm::BasicBlock *FallthroughDest = 0; 1038 llvm::SmallVector<llvm::Instruction*, 2> InstsToAppend; 1039 1040 // If there's exactly one branch-after and no other threads, 1041 // we can route it without a switch. 1042 if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough && 1043 Scope.getNumBranchAfters() == 1) { 1044 assert(!BranchThroughDest || !IsActive); 1045 1046 // TODO: clean up the possibly dead stores to the cleanup dest slot. 1047 llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(0); 1048 InstsToAppend.push_back(llvm::BranchInst::Create(BranchAfter)); 1049 1050 // Build a switch-out if we need it: 1051 // - if there are branch-afters threaded through the scope 1052 // - if fall-through is a branch-after 1053 // - if there are fixups that have nowhere left to go and 1054 // so must be immediately resolved 1055 } else if (Scope.getNumBranchAfters() || 1056 (HasFallthrough && !FallthroughIsBranchThrough) || 1057 (HasFixups && !HasEnclosingCleanups)) { 1058 1059 llvm::BasicBlock *Default = 1060 (BranchThroughDest ? BranchThroughDest : getUnreachableBlock()); 1061 1062 // TODO: base this on the number of branch-afters and fixups 1063 const unsigned SwitchCapacity = 10; 1064 1065 llvm::LoadInst *Load = 1066 new llvm::LoadInst(getNormalCleanupDestSlot(), "cleanup.dest"); 1067 llvm::SwitchInst *Switch = 1068 llvm::SwitchInst::Create(Load, Default, SwitchCapacity); 1069 1070 InstsToAppend.push_back(Load); 1071 InstsToAppend.push_back(Switch); 1072 1073 // Branch-after fallthrough. 1074 if (FallthroughSource && !FallthroughIsBranchThrough) { 1075 FallthroughDest = createBasicBlock("cleanup.cont"); 1076 if (HasFallthrough) 1077 Switch->addCase(Builder.getInt32(0), FallthroughDest); 1078 } 1079 1080 for (unsigned I = 0, E = Scope.getNumBranchAfters(); I != E; ++I) { 1081 Switch->addCase(Scope.getBranchAfterIndex(I), 1082 Scope.getBranchAfterBlock(I)); 1083 } 1084 1085 // If there aren't any enclosing cleanups, we can resolve all 1086 // the fixups now. 1087 if (HasFixups && !HasEnclosingCleanups) 1088 ResolveAllBranchFixups(*this, Switch, NormalEntry); 1089 } else { 1090 // We should always have a branch-through destination in this case. 1091 assert(BranchThroughDest); 1092 InstsToAppend.push_back(llvm::BranchInst::Create(BranchThroughDest)); 1093 } 1094 1095 // IV. Pop the cleanup and emit it. 1096 EHStack.popCleanup(); 1097 assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups); 1098 1099 EmitCleanup(*this, Fn, /*ForEH*/ false, NormalActiveFlag); 1100 1101 // Append the prepared cleanup prologue from above. 1102 llvm::BasicBlock *NormalExit = Builder.GetInsertBlock(); 1103 for (unsigned I = 0, E = InstsToAppend.size(); I != E; ++I) 1104 NormalExit->getInstList().push_back(InstsToAppend[I]); 1105 1106 // Optimistically hope that any fixups will continue falling through. 1107 for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups(); 1108 I < E; ++I) { 1109 BranchFixup &Fixup = CGF.EHStack.getBranchFixup(I); 1110 if (!Fixup.Destination) continue; 1111 if (!Fixup.OptimisticBranchBlock) { 1112 new llvm::StoreInst(Builder.getInt32(Fixup.DestinationIndex), 1113 getNormalCleanupDestSlot(), 1114 Fixup.InitialBranch); 1115 Fixup.InitialBranch->setSuccessor(0, NormalEntry); 1116 } 1117 Fixup.OptimisticBranchBlock = NormalExit; 1118 } 1119 1120 // V. Set up the fallthrough edge out. 1121 1122 // Case 1: a fallthrough source exists but shouldn't branch to 1123 // the cleanup because the cleanup is inactive. 1124 if (!HasFallthrough && FallthroughSource) { 1125 assert(!IsActive); 1126 1127 // If we have a prebranched fallthrough, that needs to be 1128 // forwarded to the right block. 1129 if (HasPrebranchedFallthrough) { 1130 llvm::BasicBlock *Next; 1131 if (FallthroughIsBranchThrough) { 1132 Next = BranchThroughDest; 1133 assert(!FallthroughDest); 1134 } else { 1135 Next = FallthroughDest; 1136 } 1137 1138 ForwardPrebranchedFallthrough(FallthroughSource, NormalEntry, Next); 1139 } 1140 Builder.SetInsertPoint(FallthroughSource); 1141 1142 // Case 2: a fallthrough source exists and should branch to the 1143 // cleanup, but we're not supposed to branch through to the next 1144 // cleanup. 1145 } else if (HasFallthrough && FallthroughDest) { 1146 assert(!FallthroughIsBranchThrough); 1147 EmitBlock(FallthroughDest); 1148 1149 // Case 3: a fallthrough source exists and should branch to the 1150 // cleanup and then through to the next. 1151 } else if (HasFallthrough) { 1152 // Everything is already set up for this. 1153 1154 // Case 4: no fallthrough source exists. 1155 } else { 1156 Builder.ClearInsertionPoint(); 1157 } 1158 1159 // VI. Assorted cleaning. 1160 1161 // Check whether we can merge NormalEntry into a single predecessor. 1162 // This might invalidate (non-IR) pointers to NormalEntry. 1163 llvm::BasicBlock *NewNormalEntry = 1164 SimplifyCleanupEntry(*this, NormalEntry); 1165 1166 // If it did invalidate those pointers, and NormalEntry was the same 1167 // as NormalExit, go back and patch up the fixups. 1168 if (NewNormalEntry != NormalEntry && NormalEntry == NormalExit) 1169 for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups(); 1170 I < E; ++I) 1171 CGF.EHStack.getBranchFixup(I).OptimisticBranchBlock = NewNormalEntry; 1172 } 1173 } 1174 1175 assert(EHStack.hasNormalCleanups() || EHStack.getNumBranchFixups() == 0); 1176 1177 // Emit the EH cleanup if required. 1178 if (RequiresEHCleanup) { 1179 CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP(); 1180 1181 EmitBlock(EHEntry); 1182 EmitCleanup(*this, Fn, /*ForEH*/ true, EHActiveFlag); 1183 1184 // Append the prepared cleanup prologue from above. 1185 llvm::BasicBlock *EHExit = Builder.GetInsertBlock(); 1186 for (unsigned I = 0, E = EHInstsToAppend.size(); I != E; ++I) 1187 EHExit->getInstList().push_back(EHInstsToAppend[I]); 1188 1189 Builder.restoreIP(SavedIP); 1190 1191 SimplifyCleanupEntry(*this, EHEntry); 1192 } 1193} 1194 1195/// Terminate the current block by emitting a branch which might leave 1196/// the current cleanup-protected scope. The target scope may not yet 1197/// be known, in which case this will require a fixup. 1198/// 1199/// As a side-effect, this method clears the insertion point. 1200void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) { 1201 assert(Dest.getScopeDepth().encloses(EHStack.getInnermostNormalCleanup()) 1202 && "stale jump destination"); 1203 1204 if (!HaveInsertPoint()) 1205 return; 1206 1207 // Create the branch. 1208 llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock()); 1209 1210 // Calculate the innermost active normal cleanup. 1211 EHScopeStack::stable_iterator 1212 TopCleanup = EHStack.getInnermostActiveNormalCleanup(); 1213 1214 // If we're not in an active normal cleanup scope, or if the 1215 // destination scope is within the innermost active normal cleanup 1216 // scope, we don't need to worry about fixups. 1217 if (TopCleanup == EHStack.stable_end() || 1218 TopCleanup.encloses(Dest.getScopeDepth())) { // works for invalid 1219 Builder.ClearInsertionPoint(); 1220 return; 1221 } 1222 1223 // If we can't resolve the destination cleanup scope, just add this 1224 // to the current cleanup scope as a branch fixup. 1225 if (!Dest.getScopeDepth().isValid()) { 1226 BranchFixup &Fixup = EHStack.addBranchFixup(); 1227 Fixup.Destination = Dest.getBlock(); 1228 Fixup.DestinationIndex = Dest.getDestIndex(); 1229 Fixup.InitialBranch = BI; 1230 Fixup.OptimisticBranchBlock = 0; 1231 1232 Builder.ClearInsertionPoint(); 1233 return; 1234 } 1235 1236 // Otherwise, thread through all the normal cleanups in scope. 1237 1238 // Store the index at the start. 1239 llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex()); 1240 new llvm::StoreInst(Index, getNormalCleanupDestSlot(), BI); 1241 1242 // Adjust BI to point to the first cleanup block. 1243 { 1244 EHCleanupScope &Scope = 1245 cast<EHCleanupScope>(*EHStack.find(TopCleanup)); 1246 BI->setSuccessor(0, CreateNormalEntry(*this, Scope)); 1247 } 1248 1249 // Add this destination to all the scopes involved. 1250 EHScopeStack::stable_iterator I = TopCleanup; 1251 EHScopeStack::stable_iterator E = Dest.getScopeDepth(); 1252 if (E.strictlyEncloses(I)) { 1253 while (true) { 1254 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I)); 1255 assert(Scope.isNormalCleanup()); 1256 I = Scope.getEnclosingNormalCleanup(); 1257 1258 // If this is the last cleanup we're propagating through, tell it 1259 // that there's a resolved jump moving through it. 1260 if (!E.strictlyEncloses(I)) { 1261 Scope.addBranchAfter(Index, Dest.getBlock()); 1262 break; 1263 } 1264 1265 // Otherwise, tell the scope that there's a jump propoagating 1266 // through it. If this isn't new information, all the rest of 1267 // the work has been done before. 1268 if (!Scope.addBranchThrough(Dest.getBlock())) 1269 break; 1270 } 1271 } 1272 1273 Builder.ClearInsertionPoint(); 1274} 1275 1276void CodeGenFunction::EmitBranchThroughEHCleanup(UnwindDest Dest) { 1277 // We should never get invalid scope depths for an UnwindDest; that 1278 // implies that the destination wasn't set up correctly. 1279 assert(Dest.getScopeDepth().isValid() && "invalid scope depth on EH dest?"); 1280 1281 if (!HaveInsertPoint()) 1282 return; 1283 1284 // Create the branch. 1285 llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock()); 1286 1287 // Calculate the innermost active cleanup. 1288 EHScopeStack::stable_iterator 1289 InnermostCleanup = EHStack.getInnermostActiveEHCleanup(); 1290 1291 // If the destination is in the same EH cleanup scope as us, we 1292 // don't need to thread through anything. 1293 if (InnermostCleanup.encloses(Dest.getScopeDepth())) { 1294 Builder.ClearInsertionPoint(); 1295 return; 1296 } 1297 assert(InnermostCleanup != EHStack.stable_end()); 1298 1299 // Store the index at the start. 1300 llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex()); 1301 new llvm::StoreInst(Index, getEHCleanupDestSlot(), BI); 1302 1303 // Adjust BI to point to the first cleanup block. 1304 { 1305 EHCleanupScope &Scope = 1306 cast<EHCleanupScope>(*EHStack.find(InnermostCleanup)); 1307 BI->setSuccessor(0, CreateEHEntry(*this, Scope)); 1308 } 1309 1310 // Add this destination to all the scopes involved. 1311 for (EHScopeStack::stable_iterator 1312 I = InnermostCleanup, E = Dest.getScopeDepth(); ; ) { 1313 assert(E.strictlyEncloses(I)); 1314 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I)); 1315 assert(Scope.isEHCleanup()); 1316 I = Scope.getEnclosingEHCleanup(); 1317 1318 // If this is the last cleanup we're propagating through, add this 1319 // as a branch-after. 1320 if (I == E) { 1321 Scope.addEHBranchAfter(Index, Dest.getBlock()); 1322 break; 1323 } 1324 1325 // Otherwise, add it as a branch-through. If this isn't new 1326 // information, all the rest of the work has been done before. 1327 if (!Scope.addEHBranchThrough(Dest.getBlock())) 1328 break; 1329 } 1330 1331 Builder.ClearInsertionPoint(); 1332} 1333 1334/// All the branch fixups on the EH stack have propagated out past the 1335/// outermost normal cleanup; resolve them all by adding cases to the 1336/// given switch instruction. 1337static void ResolveAllBranchFixups(CodeGenFunction &CGF, 1338 llvm::SwitchInst *Switch, 1339 llvm::BasicBlock *CleanupEntry) { 1340 llvm::SmallPtrSet<llvm::BasicBlock*, 4> CasesAdded; 1341 1342 for (unsigned I = 0, E = CGF.EHStack.getNumBranchFixups(); I != E; ++I) { 1343 // Skip this fixup if its destination isn't set or if we've 1344 // already treated it. 1345 BranchFixup &Fixup = CGF.EHStack.getBranchFixup(I); 1346 if (Fixup.Destination == 0) continue; 1347 if (!CasesAdded.insert(Fixup.Destination)) continue; 1348 1349 // If there isn't an OptimisticBranchBlock, then InitialBranch is 1350 // still pointing directly to its destination; forward it to the 1351 // appropriate cleanup entry. This is required in the specific 1352 // case of 1353 // { std::string s; goto lbl; } 1354 // lbl: 1355 // i.e. where there's an unresolved fixup inside a single cleanup 1356 // entry which we're currently popping. 1357 if (Fixup.OptimisticBranchBlock == 0) { 1358 new llvm::StoreInst(CGF.Builder.getInt32(Fixup.DestinationIndex), 1359 CGF.getNormalCleanupDestSlot(), 1360 Fixup.InitialBranch); 1361 Fixup.InitialBranch->setSuccessor(0, CleanupEntry); 1362 } 1363 1364 Switch->addCase(CGF.Builder.getInt32(Fixup.DestinationIndex), 1365 Fixup.Destination); 1366 } 1367 1368 CGF.EHStack.clearFixups(); 1369} 1370 1371void CodeGenFunction::ResolveBranchFixups(llvm::BasicBlock *Block) { 1372 assert(Block && "resolving a null target block"); 1373 if (!EHStack.getNumBranchFixups()) return; 1374 1375 assert(EHStack.hasNormalCleanups() && 1376 "branch fixups exist with no normal cleanups on stack"); 1377 1378 llvm::SmallPtrSet<llvm::BasicBlock*, 4> ModifiedOptimisticBlocks; 1379 bool ResolvedAny = false; 1380 1381 for (unsigned I = 0, E = EHStack.getNumBranchFixups(); I != E; ++I) { 1382 // Skip this fixup if its destination doesn't match. 1383 BranchFixup &Fixup = EHStack.getBranchFixup(I); 1384 if (Fixup.Destination != Block) continue; 1385 1386 Fixup.Destination = 0; 1387 ResolvedAny = true; 1388 1389 // If it doesn't have an optimistic branch block, LatestBranch is 1390 // already pointing to the right place. 1391 llvm::BasicBlock *BranchBB = Fixup.OptimisticBranchBlock; 1392 if (!BranchBB) 1393 continue; 1394 1395 // Don't process the same optimistic branch block twice. 1396 if (!ModifiedOptimisticBlocks.insert(BranchBB)) 1397 continue; 1398 1399 llvm::SwitchInst *Switch = TransitionToCleanupSwitch(*this, BranchBB); 1400 1401 // Add a case to the switch. 1402 Switch->addCase(Builder.getInt32(Fixup.DestinationIndex), Block); 1403 } 1404 1405 if (ResolvedAny) 1406 EHStack.popNullFixups(); 1407} 1408 1409static bool IsUsedAsNormalCleanup(EHScopeStack &EHStack, 1410 EHScopeStack::stable_iterator C) { 1411 // If we needed a normal block for any reason, that counts. 1412 if (cast<EHCleanupScope>(*EHStack.find(C)).getNormalBlock()) 1413 return true; 1414 1415 // Check whether any enclosed cleanups were needed. 1416 for (EHScopeStack::stable_iterator 1417 I = EHStack.getInnermostNormalCleanup(); 1418 I != C; ) { 1419 assert(C.strictlyEncloses(I)); 1420 EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I)); 1421 if (S.getNormalBlock()) return true; 1422 I = S.getEnclosingNormalCleanup(); 1423 } 1424 1425 return false; 1426} 1427 1428static bool IsUsedAsEHCleanup(EHScopeStack &EHStack, 1429 EHScopeStack::stable_iterator C) { 1430 // If we needed an EH block for any reason, that counts. 1431 if (cast<EHCleanupScope>(*EHStack.find(C)).getEHBlock()) 1432 return true; 1433 1434 // Check whether any enclosed cleanups were needed. 1435 for (EHScopeStack::stable_iterator 1436 I = EHStack.getInnermostEHCleanup(); I != C; ) { 1437 assert(C.strictlyEncloses(I)); 1438 EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I)); 1439 if (S.getEHBlock()) return true; 1440 I = S.getEnclosingEHCleanup(); 1441 } 1442 1443 return false; 1444} 1445 1446enum ForActivation_t { 1447 ForActivation, 1448 ForDeactivation 1449}; 1450 1451/// The given cleanup block is changing activation state. Configure a 1452/// cleanup variable if necessary. 1453/// 1454/// It would be good if we had some way of determining if there were 1455/// extra uses *after* the change-over point. 1456static void SetupCleanupBlockActivation(CodeGenFunction &CGF, 1457 EHScopeStack::stable_iterator C, 1458 ForActivation_t Kind) { 1459 EHCleanupScope &Scope = cast<EHCleanupScope>(*CGF.EHStack.find(C)); 1460 1461 // We always need the flag if we're activating the cleanup, because 1462 // we have to assume that the current location doesn't necessarily 1463 // dominate all future uses of the cleanup. 1464 bool NeedFlag = (Kind == ForActivation); 1465 1466 // Calculate whether the cleanup was used: 1467 1468 // - as a normal cleanup 1469 if (Scope.isNormalCleanup() && IsUsedAsNormalCleanup(CGF.EHStack, C)) { 1470 Scope.setTestFlagInNormalCleanup(); 1471 NeedFlag = true; 1472 } 1473 1474 // - as an EH cleanup 1475 if (Scope.isEHCleanup() && IsUsedAsEHCleanup(CGF.EHStack, C)) { 1476 Scope.setTestFlagInEHCleanup(); 1477 NeedFlag = true; 1478 } 1479 1480 // If it hasn't yet been used as either, we're done. 1481 if (!NeedFlag) return; 1482 1483 llvm::AllocaInst *Var = Scope.getActiveFlag(); 1484 if (!Var) { 1485 Var = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), "cleanup.isactive"); 1486 Scope.setActiveFlag(Var); 1487 1488 // Initialize to true or false depending on whether it was 1489 // active up to this point. 1490 CGF.InitTempAlloca(Var, CGF.Builder.getInt1(Kind == ForDeactivation)); 1491 } 1492 1493 CGF.Builder.CreateStore(CGF.Builder.getInt1(Kind == ForActivation), Var); 1494} 1495 1496/// Activate a cleanup that was created in an inactivated state. 1497void CodeGenFunction::ActivateCleanupBlock(EHScopeStack::stable_iterator C) { 1498 assert(C != EHStack.stable_end() && "activating bottom of stack?"); 1499 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C)); 1500 assert(!Scope.isActive() && "double activation"); 1501 1502 SetupCleanupBlockActivation(*this, C, ForActivation); 1503 1504 Scope.setActive(true); 1505} 1506 1507/// Deactive a cleanup that was created in an active state. 1508void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C) { 1509 assert(C != EHStack.stable_end() && "deactivating bottom of stack?"); 1510 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C)); 1511 assert(Scope.isActive() && "double deactivation"); 1512 1513 // If it's the top of the stack, just pop it. 1514 if (C == EHStack.stable_begin()) { 1515 // If it's a normal cleanup, we need to pretend that the 1516 // fallthrough is unreachable. 1517 CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP(); 1518 PopCleanupBlock(); 1519 Builder.restoreIP(SavedIP); 1520 return; 1521 } 1522 1523 // Otherwise, follow the general case. 1524 SetupCleanupBlockActivation(*this, C, ForDeactivation); 1525 1526 Scope.setActive(false); 1527} 1528 1529llvm::Value *CodeGenFunction::getNormalCleanupDestSlot() { 1530 if (!NormalCleanupDest) 1531 NormalCleanupDest = 1532 CreateTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot"); 1533 return NormalCleanupDest; 1534} 1535 1536llvm::Value *CodeGenFunction::getEHCleanupDestSlot() { 1537 if (!EHCleanupDest) 1538 EHCleanupDest = 1539 CreateTempAlloca(Builder.getInt32Ty(), "eh.cleanup.dest.slot"); 1540 return EHCleanupDest; 1541} 1542 1543void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E, 1544 llvm::ConstantInt *Init) { 1545 assert (Init && "Invalid DeclRefExpr initializer!"); 1546 if (CGDebugInfo *Dbg = getDebugInfo()) 1547 Dbg->EmitGlobalVariable(E->getDecl(), Init, Builder); 1548} 1549