InlineFunction.cpp revision a7212e58260f6d1ead0c4eec7af400cf6c0d289e
1//===- InlineFunction.cpp - Code to perform function inlining -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements inlining of a function into a call site, resolving 11// parameters and the return value as appropriate. 12// 13//===----------------------------------------------------------------------===// 14 15#include "llvm/Transforms/Utils/Cloning.h" 16#include "llvm/Constants.h" 17#include "llvm/DerivedTypes.h" 18#include "llvm/Module.h" 19#include "llvm/Instructions.h" 20#include "llvm/Intrinsics.h" 21#include "llvm/ParameterAttributes.h" 22#include "llvm/Analysis/CallGraph.h" 23#include "llvm/Target/TargetData.h" 24#include "llvm/ADT/SmallVector.h" 25#include "llvm/ADT/StringExtras.h" 26#include "llvm/Support/CallSite.h" 27using namespace llvm; 28 29bool llvm::InlineFunction(CallInst *CI, CallGraph *CG, const TargetData *TD) { 30 return InlineFunction(CallSite(CI), CG, TD); 31} 32bool llvm::InlineFunction(InvokeInst *II, CallGraph *CG, const TargetData *TD) { 33 return InlineFunction(CallSite(II), CG, TD); 34} 35 36/// HandleInlinedInvoke - If we inlined an invoke site, we need to convert calls 37/// in the body of the inlined function into invokes and turn unwind 38/// instructions into branches to the invoke unwind dest. 39/// 40/// II is the invoke instruction begin inlined. FirstNewBlock is the first 41/// block of the inlined code (the last block is the end of the function), 42/// and InlineCodeInfo is information about the code that got inlined. 43static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock, 44 ClonedCodeInfo &InlinedCodeInfo) { 45 BasicBlock *InvokeDest = II->getUnwindDest(); 46 std::vector<Value*> InvokeDestPHIValues; 47 48 // If there are PHI nodes in the unwind destination block, we need to 49 // keep track of which values came into them from this invoke, then remove 50 // the entry for this block. 51 BasicBlock *InvokeBlock = II->getParent(); 52 for (BasicBlock::iterator I = InvokeDest->begin(); isa<PHINode>(I); ++I) { 53 PHINode *PN = cast<PHINode>(I); 54 // Save the value to use for this edge. 55 InvokeDestPHIValues.push_back(PN->getIncomingValueForBlock(InvokeBlock)); 56 } 57 58 Function *Caller = FirstNewBlock->getParent(); 59 60 // The inlined code is currently at the end of the function, scan from the 61 // start of the inlined code to its end, checking for stuff we need to 62 // rewrite. 63 if (InlinedCodeInfo.ContainsCalls || InlinedCodeInfo.ContainsUnwinds) { 64 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); 65 BB != E; ++BB) { 66 if (InlinedCodeInfo.ContainsCalls) { 67 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ){ 68 Instruction *I = BBI++; 69 70 // We only need to check for function calls: inlined invoke 71 // instructions require no special handling. 72 if (!isa<CallInst>(I)) continue; 73 CallInst *CI = cast<CallInst>(I); 74 75 // If this call cannot unwind, don't convert it to an invoke. 76 if (CI->doesNotThrow()) 77 continue; 78 79 // Convert this function call into an invoke instruction. 80 // First, split the basic block. 81 BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc"); 82 83 // Next, create the new invoke instruction, inserting it at the end 84 // of the old basic block. 85 SmallVector<Value*, 8> InvokeArgs(CI->op_begin()+1, CI->op_end()); 86 InvokeInst *II = 87 InvokeInst::Create(CI->getCalledValue(), Split, InvokeDest, 88 InvokeArgs.begin(), InvokeArgs.end(), 89 CI->getName(), BB->getTerminator()); 90 II->setCallingConv(CI->getCallingConv()); 91 II->setParamAttrs(CI->getParamAttrs()); 92 93 // Make sure that anything using the call now uses the invoke! 94 CI->replaceAllUsesWith(II); 95 96 // Delete the unconditional branch inserted by splitBasicBlock 97 BB->getInstList().pop_back(); 98 Split->getInstList().pop_front(); // Delete the original call 99 100 // Update any PHI nodes in the exceptional block to indicate that 101 // there is now a new entry in them. 102 unsigned i = 0; 103 for (BasicBlock::iterator I = InvokeDest->begin(); 104 isa<PHINode>(I); ++I, ++i) { 105 PHINode *PN = cast<PHINode>(I); 106 PN->addIncoming(InvokeDestPHIValues[i], BB); 107 } 108 109 // This basic block is now complete, start scanning the next one. 110 break; 111 } 112 } 113 114 if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) { 115 // An UnwindInst requires special handling when it gets inlined into an 116 // invoke site. Once this happens, we know that the unwind would cause 117 // a control transfer to the invoke exception destination, so we can 118 // transform it into a direct branch to the exception destination. 119 BranchInst::Create(InvokeDest, UI); 120 121 // Delete the unwind instruction! 122 UI->eraseFromParent(); 123 124 // Update any PHI nodes in the exceptional block to indicate that 125 // there is now a new entry in them. 126 unsigned i = 0; 127 for (BasicBlock::iterator I = InvokeDest->begin(); 128 isa<PHINode>(I); ++I, ++i) { 129 PHINode *PN = cast<PHINode>(I); 130 PN->addIncoming(InvokeDestPHIValues[i], BB); 131 } 132 } 133 } 134 } 135 136 // Now that everything is happy, we have one final detail. The PHI nodes in 137 // the exception destination block still have entries due to the original 138 // invoke instruction. Eliminate these entries (which might even delete the 139 // PHI node) now. 140 InvokeDest->removePredecessor(II->getParent()); 141} 142 143/// UpdateCallGraphAfterInlining - Once we have cloned code over from a callee 144/// into the caller, update the specified callgraph to reflect the changes we 145/// made. Note that it's possible that not all code was copied over, so only 146/// some edges of the callgraph will be remain. 147static void UpdateCallGraphAfterInlining(const Function *Caller, 148 const Function *Callee, 149 Function::iterator FirstNewBlock, 150 DenseMap<const Value*, Value*> &ValueMap, 151 CallGraph &CG) { 152 // Update the call graph by deleting the edge from Callee to Caller 153 CallGraphNode *CalleeNode = CG[Callee]; 154 CallGraphNode *CallerNode = CG[Caller]; 155 CallerNode->removeCallEdgeTo(CalleeNode); 156 157 // Since we inlined some uninlined call sites in the callee into the caller, 158 // add edges from the caller to all of the callees of the callee. 159 for (CallGraphNode::iterator I = CalleeNode->begin(), 160 E = CalleeNode->end(); I != E; ++I) { 161 const Instruction *OrigCall = I->first.getInstruction(); 162 163 DenseMap<const Value*, Value*>::iterator VMI = ValueMap.find(OrigCall); 164 // Only copy the edge if the call was inlined! 165 if (VMI != ValueMap.end() && VMI->second) { 166 // If the call was inlined, but then constant folded, there is no edge to 167 // add. Check for this case. 168 if (Instruction *NewCall = dyn_cast<Instruction>(VMI->second)) 169 CallerNode->addCalledFunction(CallSite::get(NewCall), I->second); 170 } 171 } 172} 173 174 175// InlineFunction - This function inlines the called function into the basic 176// block of the caller. This returns false if it is not possible to inline this 177// call. The program is still in a well defined state if this occurs though. 178// 179// Note that this only does one level of inlining. For example, if the 180// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now 181// exists in the instruction stream. Similiarly this will inline a recursive 182// function by one level. 183// 184bool llvm::InlineFunction(CallSite CS, CallGraph *CG, const TargetData *TD) { 185 Instruction *TheCall = CS.getInstruction(); 186 assert(TheCall->getParent() && TheCall->getParent()->getParent() && 187 "Instruction not in function!"); 188 189 const Function *CalledFunc = CS.getCalledFunction(); 190 if (CalledFunc == 0 || // Can't inline external function or indirect 191 CalledFunc->isDeclaration() || // call, or call to a vararg function! 192 CalledFunc->getFunctionType()->isVarArg()) return false; 193 194 195 // If the call to the callee is a non-tail call, we must clear the 'tail' 196 // flags on any calls that we inline. 197 bool MustClearTailCallFlags = 198 isa<CallInst>(TheCall) && !cast<CallInst>(TheCall)->isTailCall(); 199 200 // If the call to the callee cannot throw, set the 'nounwind' flag on any 201 // calls that we inline. 202 bool MarkNoUnwind = CS.doesNotThrow(); 203 204 BasicBlock *OrigBB = TheCall->getParent(); 205 Function *Caller = OrigBB->getParent(); 206 207 // GC poses two hazards to inlining, which only occur when the callee has GC: 208 // 1. If the caller has no GC, then the callee's GC must be propagated to the 209 // caller. 210 // 2. If the caller has a differing GC, it is invalid to inline. 211 if (CalledFunc->hasGC()) { 212 if (!Caller->hasGC()) 213 Caller->setGC(CalledFunc->getGC()); 214 else if (CalledFunc->getGC() != Caller->getGC()) 215 return false; 216 } 217 218 // Get an iterator to the last basic block in the function, which will have 219 // the new function inlined after it. 220 // 221 Function::iterator LastBlock = &Caller->back(); 222 223 // Make sure to capture all of the return instructions from the cloned 224 // function. 225 std::vector<ReturnInst*> Returns; 226 ClonedCodeInfo InlinedFunctionInfo; 227 Function::iterator FirstNewBlock; 228 229 { // Scope to destroy ValueMap after cloning. 230 DenseMap<const Value*, Value*> ValueMap; 231 232 assert(CalledFunc->arg_size() == CS.arg_size() && 233 "No varargs calls can be inlined!"); 234 235 // Calculate the vector of arguments to pass into the function cloner, which 236 // matches up the formal to the actual argument values. 237 CallSite::arg_iterator AI = CS.arg_begin(); 238 unsigned ArgNo = 0; 239 for (Function::const_arg_iterator I = CalledFunc->arg_begin(), 240 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) { 241 Value *ActualArg = *AI; 242 243 // When byval arguments actually inlined, we need to make the copy implied 244 // by them explicit. However, we don't do this if the callee is readonly 245 // or readnone, because the copy would be unneeded: the callee doesn't 246 // modify the struct. 247 if (CalledFunc->paramHasAttr(ArgNo+1, ParamAttr::ByVal) && 248 !CalledFunc->onlyReadsMemory()) { 249 const Type *AggTy = cast<PointerType>(I->getType())->getElementType(); 250 const Type *VoidPtrTy = PointerType::getUnqual(Type::Int8Ty); 251 252 // Create the alloca. If we have TargetData, use nice alignment. 253 unsigned Align = 1; 254 if (TD) Align = TD->getPrefTypeAlignment(AggTy); 255 Value *NewAlloca = new AllocaInst(AggTy, 0, Align, I->getName(), 256 Caller->begin()->begin()); 257 // Emit a memcpy. 258 Function *MemCpyFn = Intrinsic::getDeclaration(Caller->getParent(), 259 Intrinsic::memcpy_i64); 260 Value *DestCast = new BitCastInst(NewAlloca, VoidPtrTy, "tmp", TheCall); 261 Value *SrcCast = new BitCastInst(*AI, VoidPtrTy, "tmp", TheCall); 262 263 Value *Size; 264 if (TD == 0) 265 Size = ConstantExpr::getSizeOf(AggTy); 266 else 267 Size = ConstantInt::get(Type::Int64Ty, TD->getTypeStoreSize(AggTy)); 268 269 // Always generate a memcpy of alignment 1 here because we don't know 270 // the alignment of the src pointer. Other optimizations can infer 271 // better alignment. 272 Value *CallArgs[] = { 273 DestCast, SrcCast, Size, ConstantInt::get(Type::Int32Ty, 1) 274 }; 275 CallInst *TheMemCpy = 276 CallInst::Create(MemCpyFn, CallArgs, CallArgs+4, "", TheCall); 277 278 // If we have a call graph, update it. 279 if (CG) { 280 CallGraphNode *MemCpyCGN = CG->getOrInsertFunction(MemCpyFn); 281 CallGraphNode *CallerNode = (*CG)[Caller]; 282 CallerNode->addCalledFunction(TheMemCpy, MemCpyCGN); 283 } 284 285 // Uses of the argument in the function should use our new alloca 286 // instead. 287 ActualArg = NewAlloca; 288 } 289 290 ValueMap[I] = ActualArg; 291 } 292 293 // We want the inliner to prune the code as it copies. We would LOVE to 294 // have no dead or constant instructions leftover after inlining occurs 295 // (which can happen, e.g., because an argument was constant), but we'll be 296 // happy with whatever the cloner can do. 297 CloneAndPruneFunctionInto(Caller, CalledFunc, ValueMap, Returns, ".i", 298 &InlinedFunctionInfo, TD); 299 300 // Remember the first block that is newly cloned over. 301 FirstNewBlock = LastBlock; ++FirstNewBlock; 302 303 // Update the callgraph if requested. 304 if (CG) 305 UpdateCallGraphAfterInlining(Caller, CalledFunc, FirstNewBlock, ValueMap, 306 *CG); 307 } 308 309 // If there are any alloca instructions in the block that used to be the entry 310 // block for the callee, move them to the entry block of the caller. First 311 // calculate which instruction they should be inserted before. We insert the 312 // instructions at the end of the current alloca list. 313 // 314 { 315 BasicBlock::iterator InsertPoint = Caller->begin()->begin(); 316 for (BasicBlock::iterator I = FirstNewBlock->begin(), 317 E = FirstNewBlock->end(); I != E; ) 318 if (AllocaInst *AI = dyn_cast<AllocaInst>(I++)) { 319 // If the alloca is now dead, remove it. This often occurs due to code 320 // specialization. 321 if (AI->use_empty()) { 322 AI->eraseFromParent(); 323 continue; 324 } 325 326 if (isa<Constant>(AI->getArraySize())) { 327 // Scan for the block of allocas that we can move over, and move them 328 // all at once. 329 while (isa<AllocaInst>(I) && 330 isa<Constant>(cast<AllocaInst>(I)->getArraySize())) 331 ++I; 332 333 // Transfer all of the allocas over in a block. Using splice means 334 // that the instructions aren't removed from the symbol table, then 335 // reinserted. 336 Caller->getEntryBlock().getInstList().splice( 337 InsertPoint, 338 FirstNewBlock->getInstList(), 339 AI, I); 340 } 341 } 342 } 343 344 // If the inlined code contained dynamic alloca instructions, wrap the inlined 345 // code with llvm.stacksave/llvm.stackrestore intrinsics. 346 if (InlinedFunctionInfo.ContainsDynamicAllocas) { 347 Module *M = Caller->getParent(); 348 // Get the two intrinsics we care about. 349 Constant *StackSave, *StackRestore; 350 StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave); 351 StackRestore = Intrinsic::getDeclaration(M, Intrinsic::stackrestore); 352 353 // If we are preserving the callgraph, add edges to the stacksave/restore 354 // functions for the calls we insert. 355 CallGraphNode *StackSaveCGN = 0, *StackRestoreCGN = 0, *CallerNode = 0; 356 if (CG) { 357 // We know that StackSave/StackRestore are Function*'s, because they are 358 // intrinsics which must have the right types. 359 StackSaveCGN = CG->getOrInsertFunction(cast<Function>(StackSave)); 360 StackRestoreCGN = CG->getOrInsertFunction(cast<Function>(StackRestore)); 361 CallerNode = (*CG)[Caller]; 362 } 363 364 // Insert the llvm.stacksave. 365 CallInst *SavedPtr = CallInst::Create(StackSave, "savedstack", 366 FirstNewBlock->begin()); 367 if (CG) CallerNode->addCalledFunction(SavedPtr, StackSaveCGN); 368 369 // Insert a call to llvm.stackrestore before any return instructions in the 370 // inlined function. 371 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 372 CallInst *CI = CallInst::Create(StackRestore, SavedPtr, "", Returns[i]); 373 if (CG) CallerNode->addCalledFunction(CI, StackRestoreCGN); 374 } 375 376 // Count the number of StackRestore calls we insert. 377 unsigned NumStackRestores = Returns.size(); 378 379 // If we are inlining an invoke instruction, insert restores before each 380 // unwind. These unwinds will be rewritten into branches later. 381 if (InlinedFunctionInfo.ContainsUnwinds && isa<InvokeInst>(TheCall)) { 382 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); 383 BB != E; ++BB) 384 if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) { 385 CallInst::Create(StackRestore, SavedPtr, "", UI); 386 ++NumStackRestores; 387 } 388 } 389 } 390 391 // If we are inlining tail call instruction through a call site that isn't 392 // marked 'tail', we must remove the tail marker for any calls in the inlined 393 // code. Also, calls inlined through a 'nounwind' call site should be marked 394 // 'nounwind'. 395 if (InlinedFunctionInfo.ContainsCalls && 396 (MustClearTailCallFlags || MarkNoUnwind)) { 397 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); 398 BB != E; ++BB) 399 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) 400 if (CallInst *CI = dyn_cast<CallInst>(I)) { 401 if (MustClearTailCallFlags) 402 CI->setTailCall(false); 403 if (MarkNoUnwind) 404 CI->setDoesNotThrow(); 405 } 406 } 407 408 // If we are inlining through a 'nounwind' call site then any inlined 'unwind' 409 // instructions are unreachable. 410 if (InlinedFunctionInfo.ContainsUnwinds && MarkNoUnwind) 411 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); 412 BB != E; ++BB) { 413 TerminatorInst *Term = BB->getTerminator(); 414 if (isa<UnwindInst>(Term)) { 415 new UnreachableInst(Term); 416 BB->getInstList().erase(Term); 417 } 418 } 419 420 // If we are inlining for an invoke instruction, we must make sure to rewrite 421 // any inlined 'unwind' instructions into branches to the invoke exception 422 // destination, and call instructions into invoke instructions. 423 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) 424 HandleInlinedInvoke(II, FirstNewBlock, InlinedFunctionInfo); 425 426 // If we cloned in _exactly one_ basic block, and if that block ends in a 427 // return instruction, we splice the body of the inlined callee directly into 428 // the calling basic block. 429 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) { 430 // Move all of the instructions right before the call. 431 OrigBB->getInstList().splice(TheCall, FirstNewBlock->getInstList(), 432 FirstNewBlock->begin(), FirstNewBlock->end()); 433 // Remove the cloned basic block. 434 Caller->getBasicBlockList().pop_back(); 435 436 // If the call site was an invoke instruction, add a branch to the normal 437 // destination. 438 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) 439 BranchInst::Create(II->getNormalDest(), TheCall); 440 441 // If the return instruction returned a value, replace uses of the call with 442 // uses of the returned value. 443 if (!TheCall->use_empty()) { 444 ReturnInst *R = Returns[0]; 445 TheCall->replaceAllUsesWith(R->getReturnValue()); 446 } 447 // Since we are now done with the Call/Invoke, we can delete it. 448 TheCall->eraseFromParent(); 449 450 // Since we are now done with the return instruction, delete it also. 451 Returns[0]->eraseFromParent(); 452 453 // We are now done with the inlining. 454 return true; 455 } 456 457 // Otherwise, we have the normal case, of more than one block to inline or 458 // multiple return sites. 459 460 // We want to clone the entire callee function into the hole between the 461 // "starter" and "ender" blocks. How we accomplish this depends on whether 462 // this is an invoke instruction or a call instruction. 463 BasicBlock *AfterCallBB; 464 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) { 465 466 // Add an unconditional branch to make this look like the CallInst case... 467 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall); 468 469 // Split the basic block. This guarantees that no PHI nodes will have to be 470 // updated due to new incoming edges, and make the invoke case more 471 // symmetric to the call case. 472 AfterCallBB = OrigBB->splitBasicBlock(NewBr, 473 CalledFunc->getName()+".exit"); 474 475 } else { // It's a call 476 // If this is a call instruction, we need to split the basic block that 477 // the call lives in. 478 // 479 AfterCallBB = OrigBB->splitBasicBlock(TheCall, 480 CalledFunc->getName()+".exit"); 481 } 482 483 // Change the branch that used to go to AfterCallBB to branch to the first 484 // basic block of the inlined function. 485 // 486 TerminatorInst *Br = OrigBB->getTerminator(); 487 assert(Br && Br->getOpcode() == Instruction::Br && 488 "splitBasicBlock broken!"); 489 Br->setOperand(0, FirstNewBlock); 490 491 492 // Now that the function is correct, make it a little bit nicer. In 493 // particular, move the basic blocks inserted from the end of the function 494 // into the space made by splitting the source basic block. 495 Caller->getBasicBlockList().splice(AfterCallBB, Caller->getBasicBlockList(), 496 FirstNewBlock, Caller->end()); 497 498 // Handle all of the return instructions that we just cloned in, and eliminate 499 // any users of the original call/invoke instruction. 500 const Type *RTy = CalledFunc->getReturnType(); 501 502 if (Returns.size() > 1) { 503 // The PHI node should go at the front of the new basic block to merge all 504 // possible incoming values. 505 PHINode *PHI = 0; 506 if (!TheCall->use_empty()) { 507 PHI = PHINode::Create(RTy, TheCall->getName(), 508 AfterCallBB->begin()); 509 // Anything that used the result of the function call should now use the 510 // PHI node as their operand. 511 TheCall->replaceAllUsesWith(PHI); 512 } 513 514 // Loop over all of the return instructions adding entries to the PHI node as 515 // appropriate. 516 if (PHI) { 517 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 518 ReturnInst *RI = Returns[i]; 519 assert(RI->getReturnValue()->getType() == PHI->getType() && 520 "Ret value not consistent in function!"); 521 PHI->addIncoming(RI->getReturnValue(), RI->getParent()); 522 } 523 } 524 525 // Add a branch to the merge points and remove retrun instructions. 526 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 527 ReturnInst *RI = Returns[i]; 528 BranchInst::Create(AfterCallBB, RI); 529 RI->eraseFromParent(); 530 } 531 } else if (!Returns.empty()) { 532 // Otherwise, if there is exactly one return value, just replace anything 533 // using the return value of the call with the computed value. 534 if (!TheCall->use_empty()) 535 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue()); 536 537 // Splice the code from the return block into the block that it will return 538 // to, which contains the code that was after the call. 539 BasicBlock *ReturnBB = Returns[0]->getParent(); 540 AfterCallBB->getInstList().splice(AfterCallBB->begin(), 541 ReturnBB->getInstList()); 542 543 // Update PHI nodes that use the ReturnBB to use the AfterCallBB. 544 ReturnBB->replaceAllUsesWith(AfterCallBB); 545 546 // Delete the return instruction now and empty ReturnBB now. 547 Returns[0]->eraseFromParent(); 548 ReturnBB->eraseFromParent(); 549 } else if (!TheCall->use_empty()) { 550 // No returns, but something is using the return value of the call. Just 551 // nuke the result. 552 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 553 } 554 555 // Since we are now done with the Call/Invoke, we can delete it. 556 TheCall->eraseFromParent(); 557 558 // We should always be able to fold the entry block of the function into the 559 // single predecessor of the block... 560 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!"); 561 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0); 562 563 // Splice the code entry block into calling block, right before the 564 // unconditional branch. 565 OrigBB->getInstList().splice(Br, CalleeEntry->getInstList()); 566 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes 567 568 // Remove the unconditional branch. 569 OrigBB->getInstList().erase(Br); 570 571 // Now we can remove the CalleeEntry block, which is now empty. 572 Caller->getBasicBlockList().erase(CalleeEntry); 573 574 return true; 575} 576