InlineFunction.cpp revision 517576d6f96a0acde9bab79553d89f4ceba20cf6
1//===- InlineFunction.cpp - Code to perform function inlining -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements inlining of a function into a call site, resolving 11// parameters and the return value as appropriate. 12// 13//===----------------------------------------------------------------------===// 14 15#include "llvm/Transforms/Utils/Cloning.h" 16#include "llvm/Constants.h" 17#include "llvm/DerivedTypes.h" 18#include "llvm/Module.h" 19#include "llvm/Instructions.h" 20#include "llvm/IntrinsicInst.h" 21#include "llvm/Intrinsics.h" 22#include "llvm/Attributes.h" 23#include "llvm/Analysis/CallGraph.h" 24#include "llvm/Analysis/DebugInfo.h" 25#include "llvm/Target/TargetData.h" 26#include "llvm/ADT/SmallVector.h" 27#include "llvm/ADT/StringExtras.h" 28#include "llvm/Support/CallSite.h" 29using namespace llvm; 30 31bool llvm::InlineFunction(CallInst *CI, CallGraph *CG, const TargetData *TD) { 32 return InlineFunction(CallSite(CI), CG, TD); 33} 34bool llvm::InlineFunction(InvokeInst *II, CallGraph *CG, const TargetData *TD) { 35 return InlineFunction(CallSite(II), CG, TD); 36} 37 38/// HandleInlinedInvoke - If we inlined an invoke site, we need to convert calls 39/// in the body of the inlined function into invokes and turn unwind 40/// instructions into branches to the invoke unwind dest. 41/// 42/// II is the invoke instruction being inlined. FirstNewBlock is the first 43/// block of the inlined code (the last block is the end of the function), 44/// and InlineCodeInfo is information about the code that got inlined. 45static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock, 46 ClonedCodeInfo &InlinedCodeInfo, 47 CallGraph *CG) { 48 BasicBlock *InvokeDest = II->getUnwindDest(); 49 std::vector<Value*> InvokeDestPHIValues; 50 51 // If there are PHI nodes in the unwind destination block, we need to 52 // keep track of which values came into them from this invoke, then remove 53 // the entry for this block. 54 BasicBlock *InvokeBlock = II->getParent(); 55 for (BasicBlock::iterator I = InvokeDest->begin(); isa<PHINode>(I); ++I) { 56 PHINode *PN = cast<PHINode>(I); 57 // Save the value to use for this edge. 58 InvokeDestPHIValues.push_back(PN->getIncomingValueForBlock(InvokeBlock)); 59 } 60 61 Function *Caller = FirstNewBlock->getParent(); 62 63 // The inlined code is currently at the end of the function, scan from the 64 // start of the inlined code to its end, checking for stuff we need to 65 // rewrite. 66 if (InlinedCodeInfo.ContainsCalls || InlinedCodeInfo.ContainsUnwinds) { 67 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); 68 BB != E; ++BB) { 69 if (InlinedCodeInfo.ContainsCalls) { 70 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ){ 71 Instruction *I = BBI++; 72 73 // We only need to check for function calls: inlined invoke 74 // instructions require no special handling. 75 if (!isa<CallInst>(I)) continue; 76 CallInst *CI = cast<CallInst>(I); 77 78 // If this call cannot unwind, don't convert it to an invoke. 79 if (CI->doesNotThrow()) 80 continue; 81 82 // Convert this function call into an invoke instruction. 83 // First, split the basic block. 84 BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc"); 85 86 // Next, create the new invoke instruction, inserting it at the end 87 // of the old basic block. 88 SmallVector<Value*, 8> InvokeArgs(CI->op_begin()+1, CI->op_end()); 89 InvokeInst *II = 90 InvokeInst::Create(CI->getCalledValue(), Split, InvokeDest, 91 InvokeArgs.begin(), InvokeArgs.end(), 92 CI->getName(), BB->getTerminator()); 93 II->setCallingConv(CI->getCallingConv()); 94 II->setAttributes(CI->getAttributes()); 95 96 // Make sure that anything using the call now uses the invoke! 97 CI->replaceAllUsesWith(II); 98 99 // Update the callgraph. 100 if (CG) { 101 // We should be able to do this: 102 // (*CG)[Caller]->replaceCallSite(CI, II); 103 // but that fails if the old call site isn't in the call graph, 104 // which, because of LLVM bug 3601, it sometimes isn't. 105 CallGraphNode *CGN = (*CG)[Caller]; 106 for (CallGraphNode::iterator NI = CGN->begin(), NE = CGN->end(); 107 NI != NE; ++NI) { 108 if (NI->first == CI) { 109 NI->first = II; 110 break; 111 } 112 } 113 } 114 115 // Delete the unconditional branch inserted by splitBasicBlock 116 BB->getInstList().pop_back(); 117 Split->getInstList().pop_front(); // Delete the original call 118 119 // Update any PHI nodes in the exceptional block to indicate that 120 // there is now a new entry in them. 121 unsigned i = 0; 122 for (BasicBlock::iterator I = InvokeDest->begin(); 123 isa<PHINode>(I); ++I, ++i) { 124 PHINode *PN = cast<PHINode>(I); 125 PN->addIncoming(InvokeDestPHIValues[i], BB); 126 } 127 128 // This basic block is now complete, start scanning the next one. 129 break; 130 } 131 } 132 133 if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) { 134 // An UnwindInst requires special handling when it gets inlined into an 135 // invoke site. Once this happens, we know that the unwind would cause 136 // a control transfer to the invoke exception destination, so we can 137 // transform it into a direct branch to the exception destination. 138 BranchInst::Create(InvokeDest, UI); 139 140 // Delete the unwind instruction! 141 UI->eraseFromParent(); 142 143 // Update any PHI nodes in the exceptional block to indicate that 144 // there is now a new entry in them. 145 unsigned i = 0; 146 for (BasicBlock::iterator I = InvokeDest->begin(); 147 isa<PHINode>(I); ++I, ++i) { 148 PHINode *PN = cast<PHINode>(I); 149 PN->addIncoming(InvokeDestPHIValues[i], BB); 150 } 151 } 152 } 153 } 154 155 // Now that everything is happy, we have one final detail. The PHI nodes in 156 // the exception destination block still have entries due to the original 157 // invoke instruction. Eliminate these entries (which might even delete the 158 // PHI node) now. 159 InvokeDest->removePredecessor(II->getParent()); 160} 161 162/// UpdateCallGraphAfterInlining - Once we have cloned code over from a callee 163/// into the caller, update the specified callgraph to reflect the changes we 164/// made. Note that it's possible that not all code was copied over, so only 165/// some edges of the callgraph may remain. 166static void UpdateCallGraphAfterInlining(CallSite CS, 167 Function::iterator FirstNewBlock, 168 DenseMap<const Value*, Value*> &ValueMap, 169 CallGraph &CG) { 170 const Function *Caller = CS.getInstruction()->getParent()->getParent(); 171 const Function *Callee = CS.getCalledFunction(); 172 CallGraphNode *CalleeNode = CG[Callee]; 173 CallGraphNode *CallerNode = CG[Caller]; 174 175 // Since we inlined some uninlined call sites in the callee into the caller, 176 // add edges from the caller to all of the callees of the callee. 177 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end(); 178 179 // Consider the case where CalleeNode == CallerNode. 180 CallGraphNode::CalledFunctionsVector CallCache; 181 if (CalleeNode == CallerNode) { 182 CallCache.assign(I, E); 183 I = CallCache.begin(); 184 E = CallCache.end(); 185 } 186 187 for (; I != E; ++I) { 188 const Instruction *OrigCall = I->first.getInstruction(); 189 190 DenseMap<const Value*, Value*>::iterator VMI = ValueMap.find(OrigCall); 191 // Only copy the edge if the call was inlined! 192 if (VMI != ValueMap.end() && VMI->second) { 193 // If the call was inlined, but then constant folded, there is no edge to 194 // add. Check for this case. 195 if (Instruction *NewCall = dyn_cast<Instruction>(VMI->second)) 196 CallerNode->addCalledFunction(CallSite::get(NewCall), I->second); 197 } 198 } 199 // Update the call graph by deleting the edge from Callee to Caller. We must 200 // do this after the loop above in case Caller and Callee are the same. 201 CallerNode->removeCallEdgeFor(CS); 202} 203 204/// findFnRegionEndMarker - This is a utility routine that is used by 205/// InlineFunction. Return llvm.dbg.region.end intrinsic that corresponds 206/// to the llvm.dbg.func.start of the function F. Otherwise return NULL. 207static const DbgRegionEndInst *findFnRegionEndMarker(const Function *F) { 208 209 GlobalVariable *FnStart = NULL; 210 const DbgRegionEndInst *FnEnd = NULL; 211 for (Function::const_iterator FI = F->begin(), FE =F->end(); FI != FE; ++FI) 212 for (BasicBlock::const_iterator BI = FI->begin(), BE = FI->end(); BI != BE; 213 ++BI) { 214 if (FnStart == NULL) { 215 if (const DbgFuncStartInst *FSI = dyn_cast<DbgFuncStartInst>(BI)) { 216 DISubprogram SP(cast<GlobalVariable>(FSI->getSubprogram())); 217 assert (SP.isNull() == false && "Invalid llvm.dbg.func.start"); 218 if (SP.describes(F)) 219 FnStart = SP.getGV(); 220 } 221 } else { 222 if (const DbgRegionEndInst *REI = dyn_cast<DbgRegionEndInst>(BI)) 223 if (REI->getContext() == FnStart) 224 FnEnd = REI; 225 } 226 } 227 return FnEnd; 228} 229 230// InlineFunction - This function inlines the called function into the basic 231// block of the caller. This returns false if it is not possible to inline this 232// call. The program is still in a well defined state if this occurs though. 233// 234// Note that this only does one level of inlining. For example, if the 235// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now 236// exists in the instruction stream. Similiarly this will inline a recursive 237// function by one level. 238// 239bool llvm::InlineFunction(CallSite CS, CallGraph *CG, const TargetData *TD) { 240 Instruction *TheCall = CS.getInstruction(); 241 assert(TheCall->getParent() && TheCall->getParent()->getParent() && 242 "Instruction not in function!"); 243 244 const Function *CalledFunc = CS.getCalledFunction(); 245 if (CalledFunc == 0 || // Can't inline external function or indirect 246 CalledFunc->isDeclaration() || // call, or call to a vararg function! 247 CalledFunc->getFunctionType()->isVarArg()) return false; 248 249 250 // If the call to the callee is not a tail call, we must clear the 'tail' 251 // flags on any calls that we inline. 252 bool MustClearTailCallFlags = 253 !(isa<CallInst>(TheCall) && cast<CallInst>(TheCall)->isTailCall()); 254 255 // If the call to the callee cannot throw, set the 'nounwind' flag on any 256 // calls that we inline. 257 bool MarkNoUnwind = CS.doesNotThrow(); 258 259 BasicBlock *OrigBB = TheCall->getParent(); 260 Function *Caller = OrigBB->getParent(); 261 262 // GC poses two hazards to inlining, which only occur when the callee has GC: 263 // 1. If the caller has no GC, then the callee's GC must be propagated to the 264 // caller. 265 // 2. If the caller has a differing GC, it is invalid to inline. 266 if (CalledFunc->hasGC()) { 267 if (!Caller->hasGC()) 268 Caller->setGC(CalledFunc->getGC()); 269 else if (CalledFunc->getGC() != Caller->getGC()) 270 return false; 271 } 272 273 // Get an iterator to the last basic block in the function, which will have 274 // the new function inlined after it. 275 // 276 Function::iterator LastBlock = &Caller->back(); 277 278 // Make sure to capture all of the return instructions from the cloned 279 // function. 280 std::vector<ReturnInst*> Returns; 281 ClonedCodeInfo InlinedFunctionInfo; 282 Function::iterator FirstNewBlock; 283 284 { // Scope to destroy ValueMap after cloning. 285 DenseMap<const Value*, Value*> ValueMap; 286 287 assert(CalledFunc->arg_size() == CS.arg_size() && 288 "No varargs calls can be inlined!"); 289 290 // Calculate the vector of arguments to pass into the function cloner, which 291 // matches up the formal to the actual argument values. 292 CallSite::arg_iterator AI = CS.arg_begin(); 293 unsigned ArgNo = 0; 294 for (Function::const_arg_iterator I = CalledFunc->arg_begin(), 295 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) { 296 Value *ActualArg = *AI; 297 298 // When byval arguments actually inlined, we need to make the copy implied 299 // by them explicit. However, we don't do this if the callee is readonly 300 // or readnone, because the copy would be unneeded: the callee doesn't 301 // modify the struct. 302 if (CalledFunc->paramHasAttr(ArgNo+1, Attribute::ByVal) && 303 !CalledFunc->onlyReadsMemory()) { 304 const Type *AggTy = cast<PointerType>(I->getType())->getElementType(); 305 const Type *VoidPtrTy = PointerType::getUnqual(Type::Int8Ty); 306 307 // Create the alloca. If we have TargetData, use nice alignment. 308 unsigned Align = 1; 309 if (TD) Align = TD->getPrefTypeAlignment(AggTy); 310 Value *NewAlloca = new AllocaInst(AggTy, 0, Align, I->getName(), 311 Caller->begin()->begin()); 312 // Emit a memcpy. 313 const Type *Tys[] = { Type::Int64Ty }; 314 Function *MemCpyFn = Intrinsic::getDeclaration(Caller->getParent(), 315 Intrinsic::memcpy, 316 Tys, 1); 317 Value *DestCast = new BitCastInst(NewAlloca, VoidPtrTy, "tmp", TheCall); 318 Value *SrcCast = new BitCastInst(*AI, VoidPtrTy, "tmp", TheCall); 319 320 Value *Size; 321 if (TD == 0) 322 Size = ConstantExpr::getSizeOf(AggTy); 323 else 324 Size = ConstantInt::get(Type::Int64Ty, TD->getTypeStoreSize(AggTy)); 325 326 // Always generate a memcpy of alignment 1 here because we don't know 327 // the alignment of the src pointer. Other optimizations can infer 328 // better alignment. 329 Value *CallArgs[] = { 330 DestCast, SrcCast, Size, ConstantInt::get(Type::Int32Ty, 1) 331 }; 332 CallInst *TheMemCpy = 333 CallInst::Create(MemCpyFn, CallArgs, CallArgs+4, "", TheCall); 334 335 // If we have a call graph, update it. 336 if (CG) { 337 CallGraphNode *MemCpyCGN = CG->getOrInsertFunction(MemCpyFn); 338 CallGraphNode *CallerNode = (*CG)[Caller]; 339 CallerNode->addCalledFunction(TheMemCpy, MemCpyCGN); 340 } 341 342 // Uses of the argument in the function should use our new alloca 343 // instead. 344 ActualArg = NewAlloca; 345 } 346 347 ValueMap[I] = ActualArg; 348 } 349 350 // Adjust llvm.dbg.region.end. If the CalledFunc has region end 351 // marker then clone that marker after next stop point at the 352 // call site. The function body cloner does not clone original 353 // region end marker from the CalledFunc. This will ensure that 354 // inlined function's scope ends at the right place. 355 const DbgRegionEndInst *DREI = findFnRegionEndMarker(CalledFunc); 356 if (DREI) { 357 for (BasicBlock::iterator BI = TheCall, 358 BE = TheCall->getParent()->end(); BI != BE; ++BI) { 359 if (DbgStopPointInst *DSPI = dyn_cast<DbgStopPointInst>(BI)) { 360 if (DbgRegionEndInst *NewDREI = 361 dyn_cast<DbgRegionEndInst>(DREI->clone())) 362 NewDREI->insertAfter(DSPI); 363 break; 364 } 365 } 366 } 367 368 // We want the inliner to prune the code as it copies. We would LOVE to 369 // have no dead or constant instructions leftover after inlining occurs 370 // (which can happen, e.g., because an argument was constant), but we'll be 371 // happy with whatever the cloner can do. 372 CloneAndPruneFunctionInto(Caller, CalledFunc, ValueMap, Returns, ".i", 373 &InlinedFunctionInfo, TD); 374 375 // Remember the first block that is newly cloned over. 376 FirstNewBlock = LastBlock; ++FirstNewBlock; 377 378 // Update the callgraph if requested. 379 if (CG) 380 UpdateCallGraphAfterInlining(CS, FirstNewBlock, ValueMap, *CG); 381 } 382 383 // If there are any alloca instructions in the block that used to be the entry 384 // block for the callee, move them to the entry block of the caller. First 385 // calculate which instruction they should be inserted before. We insert the 386 // instructions at the end of the current alloca list. 387 // 388 { 389 BasicBlock::iterator InsertPoint = Caller->begin()->begin(); 390 for (BasicBlock::iterator I = FirstNewBlock->begin(), 391 E = FirstNewBlock->end(); I != E; ) 392 if (AllocaInst *AI = dyn_cast<AllocaInst>(I++)) { 393 // If the alloca is now dead, remove it. This often occurs due to code 394 // specialization. 395 if (AI->use_empty()) { 396 AI->eraseFromParent(); 397 continue; 398 } 399 400 if (isa<Constant>(AI->getArraySize())) { 401 // Scan for the block of allocas that we can move over, and move them 402 // all at once. 403 while (isa<AllocaInst>(I) && 404 isa<Constant>(cast<AllocaInst>(I)->getArraySize())) 405 ++I; 406 407 // Transfer all of the allocas over in a block. Using splice means 408 // that the instructions aren't removed from the symbol table, then 409 // reinserted. 410 Caller->getEntryBlock().getInstList().splice( 411 InsertPoint, 412 FirstNewBlock->getInstList(), 413 AI, I); 414 } 415 } 416 } 417 418 // If the inlined code contained dynamic alloca instructions, wrap the inlined 419 // code with llvm.stacksave/llvm.stackrestore intrinsics. 420 if (InlinedFunctionInfo.ContainsDynamicAllocas) { 421 Module *M = Caller->getParent(); 422 // Get the two intrinsics we care about. 423 Constant *StackSave, *StackRestore; 424 StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave); 425 StackRestore = Intrinsic::getDeclaration(M, Intrinsic::stackrestore); 426 427 // If we are preserving the callgraph, add edges to the stacksave/restore 428 // functions for the calls we insert. 429 CallGraphNode *StackSaveCGN = 0, *StackRestoreCGN = 0, *CallerNode = 0; 430 if (CG) { 431 // We know that StackSave/StackRestore are Function*'s, because they are 432 // intrinsics which must have the right types. 433 StackSaveCGN = CG->getOrInsertFunction(cast<Function>(StackSave)); 434 StackRestoreCGN = CG->getOrInsertFunction(cast<Function>(StackRestore)); 435 CallerNode = (*CG)[Caller]; 436 } 437 438 // Insert the llvm.stacksave. 439 CallInst *SavedPtr = CallInst::Create(StackSave, "savedstack", 440 FirstNewBlock->begin()); 441 if (CG) CallerNode->addCalledFunction(SavedPtr, StackSaveCGN); 442 443 // Insert a call to llvm.stackrestore before any return instructions in the 444 // inlined function. 445 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 446 CallInst *CI = CallInst::Create(StackRestore, SavedPtr, "", Returns[i]); 447 if (CG) CallerNode->addCalledFunction(CI, StackRestoreCGN); 448 } 449 450 // Count the number of StackRestore calls we insert. 451 unsigned NumStackRestores = Returns.size(); 452 453 // If we are inlining an invoke instruction, insert restores before each 454 // unwind. These unwinds will be rewritten into branches later. 455 if (InlinedFunctionInfo.ContainsUnwinds && isa<InvokeInst>(TheCall)) { 456 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); 457 BB != E; ++BB) 458 if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) { 459 CallInst::Create(StackRestore, SavedPtr, "", UI); 460 ++NumStackRestores; 461 } 462 } 463 } 464 465 // If we are inlining tail call instruction through a call site that isn't 466 // marked 'tail', we must remove the tail marker for any calls in the inlined 467 // code. Also, calls inlined through a 'nounwind' call site should be marked 468 // 'nounwind'. 469 if (InlinedFunctionInfo.ContainsCalls && 470 (MustClearTailCallFlags || MarkNoUnwind)) { 471 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); 472 BB != E; ++BB) 473 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) 474 if (CallInst *CI = dyn_cast<CallInst>(I)) { 475 if (MustClearTailCallFlags) 476 CI->setTailCall(false); 477 if (MarkNoUnwind) 478 CI->setDoesNotThrow(); 479 } 480 } 481 482 // If we are inlining through a 'nounwind' call site then any inlined 'unwind' 483 // instructions are unreachable. 484 if (InlinedFunctionInfo.ContainsUnwinds && MarkNoUnwind) 485 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); 486 BB != E; ++BB) { 487 TerminatorInst *Term = BB->getTerminator(); 488 if (isa<UnwindInst>(Term)) { 489 new UnreachableInst(Term); 490 BB->getInstList().erase(Term); 491 } 492 } 493 494 // If we are inlining for an invoke instruction, we must make sure to rewrite 495 // any inlined 'unwind' instructions into branches to the invoke exception 496 // destination, and call instructions into invoke instructions. 497 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) 498 HandleInlinedInvoke(II, FirstNewBlock, InlinedFunctionInfo, CG); 499 500 // If we cloned in _exactly one_ basic block, and if that block ends in a 501 // return instruction, we splice the body of the inlined callee directly into 502 // the calling basic block. 503 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) { 504 // Move all of the instructions right before the call. 505 OrigBB->getInstList().splice(TheCall, FirstNewBlock->getInstList(), 506 FirstNewBlock->begin(), FirstNewBlock->end()); 507 // Remove the cloned basic block. 508 Caller->getBasicBlockList().pop_back(); 509 510 // If the call site was an invoke instruction, add a branch to the normal 511 // destination. 512 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) 513 BranchInst::Create(II->getNormalDest(), TheCall); 514 515 // If the return instruction returned a value, replace uses of the call with 516 // uses of the returned value. 517 if (!TheCall->use_empty()) { 518 ReturnInst *R = Returns[0]; 519 TheCall->replaceAllUsesWith(R->getReturnValue()); 520 } 521 // Since we are now done with the Call/Invoke, we can delete it. 522 TheCall->eraseFromParent(); 523 524 // Since we are now done with the return instruction, delete it also. 525 Returns[0]->eraseFromParent(); 526 527 // We are now done with the inlining. 528 return true; 529 } 530 531 // Otherwise, we have the normal case, of more than one block to inline or 532 // multiple return sites. 533 534 // We want to clone the entire callee function into the hole between the 535 // "starter" and "ender" blocks. How we accomplish this depends on whether 536 // this is an invoke instruction or a call instruction. 537 BasicBlock *AfterCallBB; 538 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) { 539 540 // Add an unconditional branch to make this look like the CallInst case... 541 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall); 542 543 // Split the basic block. This guarantees that no PHI nodes will have to be 544 // updated due to new incoming edges, and make the invoke case more 545 // symmetric to the call case. 546 AfterCallBB = OrigBB->splitBasicBlock(NewBr, 547 CalledFunc->getName()+".exit"); 548 549 } else { // It's a call 550 // If this is a call instruction, we need to split the basic block that 551 // the call lives in. 552 // 553 AfterCallBB = OrigBB->splitBasicBlock(TheCall, 554 CalledFunc->getName()+".exit"); 555 } 556 557 // Change the branch that used to go to AfterCallBB to branch to the first 558 // basic block of the inlined function. 559 // 560 TerminatorInst *Br = OrigBB->getTerminator(); 561 assert(Br && Br->getOpcode() == Instruction::Br && 562 "splitBasicBlock broken!"); 563 Br->setOperand(0, FirstNewBlock); 564 565 566 // Now that the function is correct, make it a little bit nicer. In 567 // particular, move the basic blocks inserted from the end of the function 568 // into the space made by splitting the source basic block. 569 Caller->getBasicBlockList().splice(AfterCallBB, Caller->getBasicBlockList(), 570 FirstNewBlock, Caller->end()); 571 572 // Handle all of the return instructions that we just cloned in, and eliminate 573 // any users of the original call/invoke instruction. 574 const Type *RTy = CalledFunc->getReturnType(); 575 576 if (Returns.size() > 1) { 577 // The PHI node should go at the front of the new basic block to merge all 578 // possible incoming values. 579 PHINode *PHI = 0; 580 if (!TheCall->use_empty()) { 581 PHI = PHINode::Create(RTy, TheCall->getName(), 582 AfterCallBB->begin()); 583 // Anything that used the result of the function call should now use the 584 // PHI node as their operand. 585 TheCall->replaceAllUsesWith(PHI); 586 } 587 588 // Loop over all of the return instructions adding entries to the PHI node 589 // as appropriate. 590 if (PHI) { 591 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 592 ReturnInst *RI = Returns[i]; 593 assert(RI->getReturnValue()->getType() == PHI->getType() && 594 "Ret value not consistent in function!"); 595 PHI->addIncoming(RI->getReturnValue(), RI->getParent()); 596 } 597 } 598 599 // Add a branch to the merge points and remove return instructions. 600 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 601 ReturnInst *RI = Returns[i]; 602 BranchInst::Create(AfterCallBB, RI); 603 RI->eraseFromParent(); 604 } 605 } else if (!Returns.empty()) { 606 // Otherwise, if there is exactly one return value, just replace anything 607 // using the return value of the call with the computed value. 608 if (!TheCall->use_empty()) 609 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue()); 610 611 // Splice the code from the return block into the block that it will return 612 // to, which contains the code that was after the call. 613 BasicBlock *ReturnBB = Returns[0]->getParent(); 614 AfterCallBB->getInstList().splice(AfterCallBB->begin(), 615 ReturnBB->getInstList()); 616 617 // Update PHI nodes that use the ReturnBB to use the AfterCallBB. 618 ReturnBB->replaceAllUsesWith(AfterCallBB); 619 620 // Delete the return instruction now and empty ReturnBB now. 621 Returns[0]->eraseFromParent(); 622 ReturnBB->eraseFromParent(); 623 } else if (!TheCall->use_empty()) { 624 // No returns, but something is using the return value of the call. Just 625 // nuke the result. 626 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 627 } 628 629 // Since we are now done with the Call/Invoke, we can delete it. 630 TheCall->eraseFromParent(); 631 632 // We should always be able to fold the entry block of the function into the 633 // single predecessor of the block... 634 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!"); 635 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0); 636 637 // Splice the code entry block into calling block, right before the 638 // unconditional branch. 639 OrigBB->getInstList().splice(Br, CalleeEntry->getInstList()); 640 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes 641 642 // Remove the unconditional branch. 643 OrigBB->getInstList().erase(Br); 644 645 // Now we can remove the CalleeEntry block, which is now empty. 646 Caller->getBasicBlockList().erase(CalleeEntry); 647 648 return true; 649} 650