1//===- LowerInvoke.cpp - Eliminate Invoke & Unwind instructions -----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This transformation is designed for use by code generators which do not yet 11// support stack unwinding. This pass supports two models of exception handling 12// lowering, the 'cheap' support and the 'expensive' support. 13// 14// 'Cheap' exception handling support gives the program the ability to execute 15// any program which does not "throw an exception", by turning 'invoke' 16// instructions into calls and by turning 'unwind' instructions into calls to 17// abort(). If the program does dynamically use the unwind instruction, the 18// program will print a message then abort. 19// 20// 'Expensive' exception handling support gives the full exception handling 21// support to the program at the cost of making the 'invoke' instruction 22// really expensive. It basically inserts setjmp/longjmp calls to emulate the 23// exception handling as necessary. 24// 25// Because the 'expensive' support slows down programs a lot, and EH is only 26// used for a subset of the programs, it must be specifically enabled by an 27// option. 28// 29// Note that after this pass runs the CFG is not entirely accurate (exceptional 30// control flow edges are not correct anymore) so only very simple things should 31// be done after the lowerinvoke pass has run (like generation of native code). 32// This should not be used as a general purpose "my LLVM-to-LLVM pass doesn't 33// support the invoke instruction yet" lowering pass. 34// 35//===----------------------------------------------------------------------===// 36 37#define DEBUG_TYPE "lowerinvoke" 38#include "llvm/Transforms/Scalar.h" 39#include "llvm/Constants.h" 40#include "llvm/DerivedTypes.h" 41#include "llvm/Instructions.h" 42#include "llvm/Intrinsics.h" 43#include "llvm/LLVMContext.h" 44#include "llvm/Module.h" 45#include "llvm/Pass.h" 46#include "llvm/Transforms/Utils/BasicBlockUtils.h" 47#include "llvm/Transforms/Utils/Local.h" 48#include "llvm/ADT/SmallVector.h" 49#include "llvm/ADT/Statistic.h" 50#include "llvm/Support/CommandLine.h" 51#include "llvm/Target/TargetLowering.h" 52#include <csetjmp> 53#include <set> 54using namespace llvm; 55 56STATISTIC(NumInvokes, "Number of invokes replaced"); 57STATISTIC(NumUnwinds, "Number of unwinds replaced"); 58STATISTIC(NumSpilled, "Number of registers live across unwind edges"); 59 60static cl::opt<bool> ExpensiveEHSupport("enable-correct-eh-support", 61 cl::desc("Make the -lowerinvoke pass insert expensive, but correct, EH code")); 62 63namespace { 64 class LowerInvoke : public FunctionPass { 65 // Used for both models. 66 Constant *AbortFn; 67 68 // Used for expensive EH support. 69 StructType *JBLinkTy; 70 GlobalVariable *JBListHead; 71 Constant *SetJmpFn, *LongJmpFn, *StackSaveFn, *StackRestoreFn; 72 bool useExpensiveEHSupport; 73 74 // We peek in TLI to grab the target's jmp_buf size and alignment 75 const TargetLowering *TLI; 76 77 public: 78 static char ID; // Pass identification, replacement for typeid 79 explicit LowerInvoke(const TargetLowering *tli = NULL, 80 bool useExpensiveEHSupport = ExpensiveEHSupport) 81 : FunctionPass(ID), useExpensiveEHSupport(useExpensiveEHSupport), 82 TLI(tli) { 83 initializeLowerInvokePass(*PassRegistry::getPassRegistry()); 84 } 85 bool doInitialization(Module &M); 86 bool runOnFunction(Function &F); 87 88 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 89 // This is a cluster of orthogonal Transforms 90 AU.addPreserved("mem2reg"); 91 AU.addPreservedID(LowerSwitchID); 92 } 93 94 private: 95 bool insertCheapEHSupport(Function &F); 96 void splitLiveRangesLiveAcrossInvokes(SmallVectorImpl<InvokeInst*>&Invokes); 97 void rewriteExpensiveInvoke(InvokeInst *II, unsigned InvokeNo, 98 AllocaInst *InvokeNum, AllocaInst *StackPtr, 99 SwitchInst *CatchSwitch); 100 bool insertExpensiveEHSupport(Function &F); 101 }; 102} 103 104char LowerInvoke::ID = 0; 105INITIALIZE_PASS(LowerInvoke, "lowerinvoke", 106 "Lower invoke and unwind, for unwindless code generators", 107 false, false) 108 109char &llvm::LowerInvokePassID = LowerInvoke::ID; 110 111// Public Interface To the LowerInvoke pass. 112FunctionPass *llvm::createLowerInvokePass(const TargetLowering *TLI) { 113 return new LowerInvoke(TLI, ExpensiveEHSupport); 114} 115FunctionPass *llvm::createLowerInvokePass(const TargetLowering *TLI, 116 bool useExpensiveEHSupport) { 117 return new LowerInvoke(TLI, useExpensiveEHSupport); 118} 119 120// doInitialization - Make sure that there is a prototype for abort in the 121// current module. 122bool LowerInvoke::doInitialization(Module &M) { 123 Type *VoidPtrTy = Type::getInt8PtrTy(M.getContext()); 124 if (useExpensiveEHSupport) { 125 // Insert a type for the linked list of jump buffers. 126 unsigned JBSize = TLI ? TLI->getJumpBufSize() : 0; 127 JBSize = JBSize ? JBSize : 200; 128 Type *JmpBufTy = ArrayType::get(VoidPtrTy, JBSize); 129 130 JBLinkTy = StructType::create(M.getContext(), "llvm.sjljeh.jmpbufty"); 131 Type *Elts[] = { JmpBufTy, PointerType::getUnqual(JBLinkTy) }; 132 JBLinkTy->setBody(Elts); 133 134 Type *PtrJBList = PointerType::getUnqual(JBLinkTy); 135 136 // Now that we've done that, insert the jmpbuf list head global, unless it 137 // already exists. 138 if (!(JBListHead = M.getGlobalVariable("llvm.sjljeh.jblist", PtrJBList))) { 139 JBListHead = new GlobalVariable(M, PtrJBList, false, 140 GlobalValue::LinkOnceAnyLinkage, 141 Constant::getNullValue(PtrJBList), 142 "llvm.sjljeh.jblist"); 143 } 144 145// VisualStudio defines setjmp as _setjmp 146#if defined(_MSC_VER) && defined(setjmp) && \ 147 !defined(setjmp_undefined_for_msvc) 148# pragma push_macro("setjmp") 149# undef setjmp 150# define setjmp_undefined_for_msvc 151#endif 152 153 SetJmpFn = Intrinsic::getDeclaration(&M, Intrinsic::setjmp); 154 155#if defined(_MSC_VER) && defined(setjmp_undefined_for_msvc) 156 // let's return it to _setjmp state 157# pragma pop_macro("setjmp") 158# undef setjmp_undefined_for_msvc 159#endif 160 161 LongJmpFn = Intrinsic::getDeclaration(&M, Intrinsic::longjmp); 162 StackSaveFn = Intrinsic::getDeclaration(&M, Intrinsic::stacksave); 163 StackRestoreFn = Intrinsic::getDeclaration(&M, Intrinsic::stackrestore); 164 } 165 166 // We need the 'write' and 'abort' functions for both models. 167 AbortFn = M.getOrInsertFunction("abort", Type::getVoidTy(M.getContext()), 168 (Type *)0); 169 return true; 170} 171 172bool LowerInvoke::insertCheapEHSupport(Function &F) { 173 bool Changed = false; 174 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) 175 if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) { 176 SmallVector<Value*,16> CallArgs(II->op_begin(), II->op_end() - 3); 177 // Insert a normal call instruction... 178 CallInst *NewCall = CallInst::Create(II->getCalledValue(), 179 CallArgs, "", II); 180 NewCall->takeName(II); 181 NewCall->setCallingConv(II->getCallingConv()); 182 NewCall->setAttributes(II->getAttributes()); 183 NewCall->setDebugLoc(II->getDebugLoc()); 184 II->replaceAllUsesWith(NewCall); 185 186 // Insert an unconditional branch to the normal destination. 187 BranchInst::Create(II->getNormalDest(), II); 188 189 // Remove any PHI node entries from the exception destination. 190 II->getUnwindDest()->removePredecessor(BB); 191 192 // Remove the invoke instruction now. 193 BB->getInstList().erase(II); 194 195 ++NumInvokes; Changed = true; 196 } else if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) { 197 // Insert a call to abort() 198 CallInst::Create(AbortFn, "", UI)->setTailCall(); 199 200 // Insert a return instruction. This really should be a "barrier", as it 201 // is unreachable. 202 ReturnInst::Create(F.getContext(), 203 F.getReturnType()->isVoidTy() ? 204 0 : Constant::getNullValue(F.getReturnType()), UI); 205 206 // Remove the unwind instruction now. 207 BB->getInstList().erase(UI); 208 209 ++NumUnwinds; Changed = true; 210 } 211 return Changed; 212} 213 214/// rewriteExpensiveInvoke - Insert code and hack the function to replace the 215/// specified invoke instruction with a call. 216void LowerInvoke::rewriteExpensiveInvoke(InvokeInst *II, unsigned InvokeNo, 217 AllocaInst *InvokeNum, 218 AllocaInst *StackPtr, 219 SwitchInst *CatchSwitch) { 220 ConstantInt *InvokeNoC = ConstantInt::get(Type::getInt32Ty(II->getContext()), 221 InvokeNo); 222 223 // If the unwind edge has phi nodes, split the edge. 224 if (isa<PHINode>(II->getUnwindDest()->begin())) { 225 SplitCriticalEdge(II, 1, this); 226 227 // If there are any phi nodes left, they must have a single predecessor. 228 while (PHINode *PN = dyn_cast<PHINode>(II->getUnwindDest()->begin())) { 229 PN->replaceAllUsesWith(PN->getIncomingValue(0)); 230 PN->eraseFromParent(); 231 } 232 } 233 234 // Insert a store of the invoke num before the invoke and store zero into the 235 // location afterward. 236 new StoreInst(InvokeNoC, InvokeNum, true, II); // volatile 237 238 // Insert a store of the stack ptr before the invoke, so we can restore it 239 // later in the exception case. 240 CallInst* StackSaveRet = CallInst::Create(StackSaveFn, "ssret", II); 241 new StoreInst(StackSaveRet, StackPtr, true, II); // volatile 242 243 BasicBlock::iterator NI = II->getNormalDest()->getFirstInsertionPt(); 244 // nonvolatile. 245 new StoreInst(Constant::getNullValue(Type::getInt32Ty(II->getContext())), 246 InvokeNum, false, NI); 247 248 Instruction* StackPtrLoad = 249 new LoadInst(StackPtr, "stackptr.restore", true, 250 II->getUnwindDest()->getFirstInsertionPt()); 251 CallInst::Create(StackRestoreFn, StackPtrLoad, "")->insertAfter(StackPtrLoad); 252 253 // Add a switch case to our unwind block. 254 CatchSwitch->addCase(InvokeNoC, II->getUnwindDest()); 255 256 // Insert a normal call instruction. 257 SmallVector<Value*,16> CallArgs(II->op_begin(), II->op_end() - 3); 258 CallInst *NewCall = CallInst::Create(II->getCalledValue(), 259 CallArgs, "", II); 260 NewCall->takeName(II); 261 NewCall->setCallingConv(II->getCallingConv()); 262 NewCall->setAttributes(II->getAttributes()); 263 NewCall->setDebugLoc(II->getDebugLoc()); 264 II->replaceAllUsesWith(NewCall); 265 266 // Replace the invoke with an uncond branch. 267 BranchInst::Create(II->getNormalDest(), NewCall->getParent()); 268 II->eraseFromParent(); 269} 270 271/// MarkBlocksLiveIn - Insert BB and all of its predescessors into LiveBBs until 272/// we reach blocks we've already seen. 273static void MarkBlocksLiveIn(BasicBlock *BB, std::set<BasicBlock*> &LiveBBs) { 274 if (!LiveBBs.insert(BB).second) return; // already been here. 275 276 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) 277 MarkBlocksLiveIn(*PI, LiveBBs); 278} 279 280// First thing we need to do is scan the whole function for values that are 281// live across unwind edges. Each value that is live across an unwind edge 282// we spill into a stack location, guaranteeing that there is nothing live 283// across the unwind edge. This process also splits all critical edges 284// coming out of invoke's. 285void LowerInvoke:: 286splitLiveRangesLiveAcrossInvokes(SmallVectorImpl<InvokeInst*> &Invokes) { 287 // First step, split all critical edges from invoke instructions. 288 for (unsigned i = 0, e = Invokes.size(); i != e; ++i) { 289 InvokeInst *II = Invokes[i]; 290 SplitCriticalEdge(II, 0, this); 291 SplitCriticalEdge(II, 1, this); 292 assert(!isa<PHINode>(II->getNormalDest()) && 293 !isa<PHINode>(II->getUnwindDest()) && 294 "critical edge splitting left single entry phi nodes?"); 295 } 296 297 Function *F = Invokes.back()->getParent()->getParent(); 298 299 // To avoid having to handle incoming arguments specially, we lower each arg 300 // to a copy instruction in the entry block. This ensures that the argument 301 // value itself cannot be live across the entry block. 302 BasicBlock::iterator AfterAllocaInsertPt = F->begin()->begin(); 303 while (isa<AllocaInst>(AfterAllocaInsertPt) && 304 isa<ConstantInt>(cast<AllocaInst>(AfterAllocaInsertPt)->getArraySize())) 305 ++AfterAllocaInsertPt; 306 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); 307 AI != E; ++AI) { 308 Type *Ty = AI->getType(); 309 // Aggregate types can't be cast, but are legal argument types, so we have 310 // to handle them differently. We use an extract/insert pair as a 311 // lightweight method to achieve the same goal. 312 if (isa<StructType>(Ty) || isa<ArrayType>(Ty) || isa<VectorType>(Ty)) { 313 Instruction *EI = ExtractValueInst::Create(AI, 0, "",AfterAllocaInsertPt); 314 Instruction *NI = InsertValueInst::Create(AI, EI, 0); 315 NI->insertAfter(EI); 316 AI->replaceAllUsesWith(NI); 317 // Set the operand of the instructions back to the AllocaInst. 318 EI->setOperand(0, AI); 319 NI->setOperand(0, AI); 320 } else { 321 // This is always a no-op cast because we're casting AI to AI->getType() 322 // so src and destination types are identical. BitCast is the only 323 // possibility. 324 CastInst *NC = new BitCastInst( 325 AI, AI->getType(), AI->getName()+".tmp", AfterAllocaInsertPt); 326 AI->replaceAllUsesWith(NC); 327 // Set the operand of the cast instruction back to the AllocaInst. 328 // Normally it's forbidden to replace a CastInst's operand because it 329 // could cause the opcode to reflect an illegal conversion. However, 330 // we're replacing it here with the same value it was constructed with. 331 // We do this because the above replaceAllUsesWith() clobbered the 332 // operand, but we want this one to remain. 333 NC->setOperand(0, AI); 334 } 335 } 336 337 // Finally, scan the code looking for instructions with bad live ranges. 338 for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) 339 for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ++II) { 340 // Ignore obvious cases we don't have to handle. In particular, most 341 // instructions either have no uses or only have a single use inside the 342 // current block. Ignore them quickly. 343 Instruction *Inst = II; 344 if (Inst->use_empty()) continue; 345 if (Inst->hasOneUse() && 346 cast<Instruction>(Inst->use_back())->getParent() == BB && 347 !isa<PHINode>(Inst->use_back())) continue; 348 349 // If this is an alloca in the entry block, it's not a real register 350 // value. 351 if (AllocaInst *AI = dyn_cast<AllocaInst>(Inst)) 352 if (isa<ConstantInt>(AI->getArraySize()) && BB == F->begin()) 353 continue; 354 355 // Avoid iterator invalidation by copying users to a temporary vector. 356 SmallVector<Instruction*,16> Users; 357 for (Value::use_iterator UI = Inst->use_begin(), E = Inst->use_end(); 358 UI != E; ++UI) { 359 Instruction *User = cast<Instruction>(*UI); 360 if (User->getParent() != BB || isa<PHINode>(User)) 361 Users.push_back(User); 362 } 363 364 // Scan all of the uses and see if the live range is live across an unwind 365 // edge. If we find a use live across an invoke edge, create an alloca 366 // and spill the value. 367 std::set<InvokeInst*> InvokesWithStoreInserted; 368 369 // Find all of the blocks that this value is live in. 370 std::set<BasicBlock*> LiveBBs; 371 LiveBBs.insert(Inst->getParent()); 372 while (!Users.empty()) { 373 Instruction *U = Users.back(); 374 Users.pop_back(); 375 376 if (!isa<PHINode>(U)) { 377 MarkBlocksLiveIn(U->getParent(), LiveBBs); 378 } else { 379 // Uses for a PHI node occur in their predecessor block. 380 PHINode *PN = cast<PHINode>(U); 381 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 382 if (PN->getIncomingValue(i) == Inst) 383 MarkBlocksLiveIn(PN->getIncomingBlock(i), LiveBBs); 384 } 385 } 386 387 // Now that we know all of the blocks that this thing is live in, see if 388 // it includes any of the unwind locations. 389 bool NeedsSpill = false; 390 for (unsigned i = 0, e = Invokes.size(); i != e; ++i) { 391 BasicBlock *UnwindBlock = Invokes[i]->getUnwindDest(); 392 if (UnwindBlock != BB && LiveBBs.count(UnwindBlock)) { 393 NeedsSpill = true; 394 } 395 } 396 397 // If we decided we need a spill, do it. 398 if (NeedsSpill) { 399 ++NumSpilled; 400 DemoteRegToStack(*Inst, true); 401 } 402 } 403} 404 405bool LowerInvoke::insertExpensiveEHSupport(Function &F) { 406 SmallVector<ReturnInst*,16> Returns; 407 SmallVector<UnwindInst*,16> Unwinds; 408 SmallVector<InvokeInst*,16> Invokes; 409 UnreachableInst* UnreachablePlaceholder = 0; 410 411 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) 412 if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator())) { 413 // Remember all return instructions in case we insert an invoke into this 414 // function. 415 Returns.push_back(RI); 416 } else if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) { 417 Invokes.push_back(II); 418 } else if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) { 419 Unwinds.push_back(UI); 420 } 421 422 if (Unwinds.empty() && Invokes.empty()) return false; 423 424 NumInvokes += Invokes.size(); 425 NumUnwinds += Unwinds.size(); 426 427 // TODO: This is not an optimal way to do this. In particular, this always 428 // inserts setjmp calls into the entries of functions with invoke instructions 429 // even though there are possibly paths through the function that do not 430 // execute any invokes. In particular, for functions with early exits, e.g. 431 // the 'addMove' method in hexxagon, it would be nice to not have to do the 432 // setjmp stuff on the early exit path. This requires a bit of dataflow, but 433 // would not be too hard to do. 434 435 // If we have an invoke instruction, insert a setjmp that dominates all 436 // invokes. After the setjmp, use a cond branch that goes to the original 437 // code path on zero, and to a designated 'catch' block of nonzero. 438 Value *OldJmpBufPtr = 0; 439 if (!Invokes.empty()) { 440 // First thing we need to do is scan the whole function for values that are 441 // live across unwind edges. Each value that is live across an unwind edge 442 // we spill into a stack location, guaranteeing that there is nothing live 443 // across the unwind edge. This process also splits all critical edges 444 // coming out of invoke's. 445 splitLiveRangesLiveAcrossInvokes(Invokes); 446 447 BasicBlock *EntryBB = F.begin(); 448 449 // Create an alloca for the incoming jump buffer ptr and the new jump buffer 450 // that needs to be restored on all exits from the function. This is an 451 // alloca because the value needs to be live across invokes. 452 unsigned Align = TLI ? TLI->getJumpBufAlignment() : 0; 453 AllocaInst *JmpBuf = 454 new AllocaInst(JBLinkTy, 0, Align, 455 "jblink", F.begin()->begin()); 456 457 Value *Idx[] = { Constant::getNullValue(Type::getInt32Ty(F.getContext())), 458 ConstantInt::get(Type::getInt32Ty(F.getContext()), 1) }; 459 OldJmpBufPtr = GetElementPtrInst::Create(JmpBuf, Idx, "OldBuf", 460 EntryBB->getTerminator()); 461 462 // Copy the JBListHead to the alloca. 463 Value *OldBuf = new LoadInst(JBListHead, "oldjmpbufptr", true, 464 EntryBB->getTerminator()); 465 new StoreInst(OldBuf, OldJmpBufPtr, true, EntryBB->getTerminator()); 466 467 // Add the new jumpbuf to the list. 468 new StoreInst(JmpBuf, JBListHead, true, EntryBB->getTerminator()); 469 470 // Create the catch block. The catch block is basically a big switch 471 // statement that goes to all of the invoke catch blocks. 472 BasicBlock *CatchBB = 473 BasicBlock::Create(F.getContext(), "setjmp.catch", &F); 474 475 // Create an alloca which keeps track of the stack pointer before every 476 // invoke, this allows us to properly restore the stack pointer after 477 // long jumping. 478 AllocaInst *StackPtr = new AllocaInst(Type::getInt8PtrTy(F.getContext()), 0, 479 "stackptr", EntryBB->begin()); 480 481 // Create an alloca which keeps track of which invoke is currently 482 // executing. For normal calls it contains zero. 483 AllocaInst *InvokeNum = new AllocaInst(Type::getInt32Ty(F.getContext()), 0, 484 "invokenum",EntryBB->begin()); 485 new StoreInst(ConstantInt::get(Type::getInt32Ty(F.getContext()), 0), 486 InvokeNum, true, EntryBB->getTerminator()); 487 488 // Insert a load in the Catch block, and a switch on its value. By default, 489 // we go to a block that just does an unwind (which is the correct action 490 // for a standard call). We insert an unreachable instruction here and 491 // modify the block to jump to the correct unwinding pad later. 492 BasicBlock *UnwindBB = BasicBlock::Create(F.getContext(), "unwindbb", &F); 493 UnreachablePlaceholder = new UnreachableInst(F.getContext(), UnwindBB); 494 495 Value *CatchLoad = new LoadInst(InvokeNum, "invoke.num", true, CatchBB); 496 SwitchInst *CatchSwitch = 497 SwitchInst::Create(CatchLoad, UnwindBB, Invokes.size(), CatchBB); 498 499 // Now that things are set up, insert the setjmp call itself. 500 501 // Split the entry block to insert the conditional branch for the setjmp. 502 BasicBlock *ContBlock = EntryBB->splitBasicBlock(EntryBB->getTerminator(), 503 "setjmp.cont"); 504 505 Idx[1] = ConstantInt::get(Type::getInt32Ty(F.getContext()), 0); 506 Value *JmpBufPtr = GetElementPtrInst::Create(JmpBuf, Idx, "TheJmpBuf", 507 EntryBB->getTerminator()); 508 JmpBufPtr = new BitCastInst(JmpBufPtr, 509 Type::getInt8PtrTy(F.getContext()), 510 "tmp", EntryBB->getTerminator()); 511 Value *SJRet = CallInst::Create(SetJmpFn, JmpBufPtr, "sjret", 512 EntryBB->getTerminator()); 513 514 // Compare the return value to zero. 515 Value *IsNormal = new ICmpInst(EntryBB->getTerminator(), 516 ICmpInst::ICMP_EQ, SJRet, 517 Constant::getNullValue(SJRet->getType()), 518 "notunwind"); 519 // Nuke the uncond branch. 520 EntryBB->getTerminator()->eraseFromParent(); 521 522 // Put in a new condbranch in its place. 523 BranchInst::Create(ContBlock, CatchBB, IsNormal, EntryBB); 524 525 // At this point, we are all set up, rewrite each invoke instruction. 526 for (unsigned i = 0, e = Invokes.size(); i != e; ++i) 527 rewriteExpensiveInvoke(Invokes[i], i+1, InvokeNum, StackPtr, CatchSwitch); 528 } 529 530 // We know that there is at least one unwind. 531 532 // Create three new blocks, the block to load the jmpbuf ptr and compare 533 // against null, the block to do the longjmp, and the error block for if it 534 // is null. Add them at the end of the function because they are not hot. 535 BasicBlock *UnwindHandler = BasicBlock::Create(F.getContext(), 536 "dounwind", &F); 537 BasicBlock *UnwindBlock = BasicBlock::Create(F.getContext(), "unwind", &F); 538 BasicBlock *TermBlock = BasicBlock::Create(F.getContext(), "unwinderror", &F); 539 540 // If this function contains an invoke, restore the old jumpbuf ptr. 541 Value *BufPtr; 542 if (OldJmpBufPtr) { 543 // Before the return, insert a copy from the saved value to the new value. 544 BufPtr = new LoadInst(OldJmpBufPtr, "oldjmpbufptr", UnwindHandler); 545 new StoreInst(BufPtr, JBListHead, UnwindHandler); 546 } else { 547 BufPtr = new LoadInst(JBListHead, "ehlist", UnwindHandler); 548 } 549 550 // Load the JBList, if it's null, then there was no catch! 551 Value *NotNull = new ICmpInst(*UnwindHandler, ICmpInst::ICMP_NE, BufPtr, 552 Constant::getNullValue(BufPtr->getType()), 553 "notnull"); 554 BranchInst::Create(UnwindBlock, TermBlock, NotNull, UnwindHandler); 555 556 // Create the block to do the longjmp. 557 // Get a pointer to the jmpbuf and longjmp. 558 Value *Idx[] = { Constant::getNullValue(Type::getInt32Ty(F.getContext())), 559 ConstantInt::get(Type::getInt32Ty(F.getContext()), 0) }; 560 Idx[0] = GetElementPtrInst::Create(BufPtr, Idx, "JmpBuf", UnwindBlock); 561 Idx[0] = new BitCastInst(Idx[0], 562 Type::getInt8PtrTy(F.getContext()), 563 "tmp", UnwindBlock); 564 Idx[1] = ConstantInt::get(Type::getInt32Ty(F.getContext()), 1); 565 CallInst::Create(LongJmpFn, Idx, "", UnwindBlock); 566 new UnreachableInst(F.getContext(), UnwindBlock); 567 568 // Set up the term block ("throw without a catch"). 569 new UnreachableInst(F.getContext(), TermBlock); 570 571 // Insert a call to abort() 572 CallInst::Create(AbortFn, "", 573 TermBlock->getTerminator())->setTailCall(); 574 575 576 // Replace all unwinds with a branch to the unwind handler. 577 for (unsigned i = 0, e = Unwinds.size(); i != e; ++i) { 578 BranchInst::Create(UnwindHandler, Unwinds[i]); 579 Unwinds[i]->eraseFromParent(); 580 } 581 582 // Replace the inserted unreachable with a branch to the unwind handler. 583 if (UnreachablePlaceholder) { 584 BranchInst::Create(UnwindHandler, UnreachablePlaceholder); 585 UnreachablePlaceholder->eraseFromParent(); 586 } 587 588 // Finally, for any returns from this function, if this function contains an 589 // invoke, restore the old jmpbuf pointer to its input value. 590 if (OldJmpBufPtr) { 591 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 592 ReturnInst *R = Returns[i]; 593 594 // Before the return, insert a copy from the saved value to the new value. 595 Value *OldBuf = new LoadInst(OldJmpBufPtr, "oldjmpbufptr", true, R); 596 new StoreInst(OldBuf, JBListHead, true, R); 597 } 598 } 599 600 return true; 601} 602 603bool LowerInvoke::runOnFunction(Function &F) { 604 if (useExpensiveEHSupport) 605 return insertExpensiveEHSupport(F); 606 else 607 return insertCheapEHSupport(F); 608} 609