InlineFunction.cpp revision 86099345db95fdce6960ab62fbd9cb0cf96875f7
1//===- InlineFunction.cpp - Code to perform function inlining -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements inlining of a function into a call site, resolving 11// parameters and the return value as appropriate. 12// 13//===----------------------------------------------------------------------===// 14 15#include "llvm/Transforms/Utils/Cloning.h" 16#include "llvm/Constants.h" 17#include "llvm/DerivedTypes.h" 18#include "llvm/Module.h" 19#include "llvm/Instructions.h" 20#include "llvm/IntrinsicInst.h" 21#include "llvm/Intrinsics.h" 22#include "llvm/Attributes.h" 23#include "llvm/Analysis/CallGraph.h" 24#include "llvm/Analysis/DebugInfo.h" 25#include "llvm/Target/TargetData.h" 26#include "llvm/ADT/SmallVector.h" 27#include "llvm/ADT/StringExtras.h" 28#include "llvm/Support/CallSite.h" 29using namespace llvm; 30 31bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI) { 32 return InlineFunction(CallSite(CI), IFI); 33} 34bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI) { 35 return InlineFunction(CallSite(II), IFI); 36} 37 38 39/// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into 40/// an invoke, we have to turn all of the calls that can throw into 41/// invokes. This function analyze BB to see if there are any calls, and if so, 42/// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI 43/// nodes in that block with the values specified in InvokeDestPHIValues. 44/// 45static void HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, 46 BasicBlock *InvokeDest, 47 const SmallVectorImpl<Value*> &InvokeDestPHIValues) { 48 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) { 49 Instruction *I = BBI++; 50 51 // We only need to check for function calls: inlined invoke 52 // instructions require no special handling. 53 CallInst *CI = dyn_cast<CallInst>(I); 54 if (CI == 0) continue; 55 56 // If this call cannot unwind, don't convert it to an invoke. 57 if (CI->doesNotThrow()) 58 continue; 59 60 // Convert this function call into an invoke instruction. 61 // First, split the basic block. 62 BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc"); 63 64 // Next, create the new invoke instruction, inserting it at the end 65 // of the old basic block. 66 ImmutableCallSite CS(CI); 67 SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end()); 68 InvokeInst *II = 69 InvokeInst::Create(CI->getCalledValue(), Split, InvokeDest, 70 InvokeArgs.begin(), InvokeArgs.end(), 71 CI->getName(), BB->getTerminator()); 72 II->setCallingConv(CI->getCallingConv()); 73 II->setAttributes(CI->getAttributes()); 74 75 // Make sure that anything using the call now uses the invoke! This also 76 // updates the CallGraph if present, because it uses a WeakVH. 77 CI->replaceAllUsesWith(II); 78 79 // Delete the unconditional branch inserted by splitBasicBlock 80 BB->getInstList().pop_back(); 81 Split->getInstList().pop_front(); // Delete the original call 82 83 // Update any PHI nodes in the exceptional block to indicate that 84 // there is now a new entry in them. 85 unsigned i = 0; 86 for (BasicBlock::iterator I = InvokeDest->begin(); 87 isa<PHINode>(I); ++I, ++i) 88 cast<PHINode>(I)->addIncoming(InvokeDestPHIValues[i], BB); 89 90 // This basic block is now complete, the caller will continue scanning the 91 // next one. 92 return; 93 } 94} 95 96 97/// HandleInlinedInvoke - If we inlined an invoke site, we need to convert calls 98/// in the body of the inlined function into invokes and turn unwind 99/// instructions into branches to the invoke unwind dest. 100/// 101/// II is the invoke instruction being inlined. FirstNewBlock is the first 102/// block of the inlined code (the last block is the end of the function), 103/// and InlineCodeInfo is information about the code that got inlined. 104static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock, 105 ClonedCodeInfo &InlinedCodeInfo) { 106 BasicBlock *InvokeDest = II->getUnwindDest(); 107 SmallVector<Value*, 8> InvokeDestPHIValues; 108 109 // If there are PHI nodes in the unwind destination block, we need to 110 // keep track of which values came into them from this invoke, then remove 111 // the entry for this block. 112 BasicBlock *InvokeBlock = II->getParent(); 113 for (BasicBlock::iterator I = InvokeDest->begin(); isa<PHINode>(I); ++I) { 114 PHINode *PN = cast<PHINode>(I); 115 // Save the value to use for this edge. 116 InvokeDestPHIValues.push_back(PN->getIncomingValueForBlock(InvokeBlock)); 117 } 118 119 Function *Caller = FirstNewBlock->getParent(); 120 121 // The inlined code is currently at the end of the function, scan from the 122 // start of the inlined code to its end, checking for stuff we need to 123 // rewrite. If the code doesn't have calls or unwinds, we know there is 124 // nothing to rewrite. 125 if (!InlinedCodeInfo.ContainsCalls && !InlinedCodeInfo.ContainsUnwinds) { 126 // Now that everything is happy, we have one final detail. The PHI nodes in 127 // the exception destination block still have entries due to the original 128 // invoke instruction. Eliminate these entries (which might even delete the 129 // PHI node) now. 130 InvokeDest->removePredecessor(II->getParent()); 131 return; 132 } 133 134 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; ++BB){ 135 if (InlinedCodeInfo.ContainsCalls) 136 HandleCallsInBlockInlinedThroughInvoke(BB, InvokeDest, 137 InvokeDestPHIValues); 138 139 if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) { 140 // An UnwindInst requires special handling when it gets inlined into an 141 // invoke site. Once this happens, we know that the unwind would cause 142 // a control transfer to the invoke exception destination, so we can 143 // transform it into a direct branch to the exception destination. 144 BranchInst::Create(InvokeDest, UI); 145 146 // Delete the unwind instruction! 147 UI->eraseFromParent(); 148 149 // Update any PHI nodes in the exceptional block to indicate that 150 // there is now a new entry in them. 151 unsigned i = 0; 152 for (BasicBlock::iterator I = InvokeDest->begin(); 153 isa<PHINode>(I); ++I, ++i) { 154 PHINode *PN = cast<PHINode>(I); 155 PN->addIncoming(InvokeDestPHIValues[i], BB); 156 } 157 } 158 } 159 160 // Now that everything is happy, we have one final detail. The PHI nodes in 161 // the exception destination block still have entries due to the original 162 // invoke instruction. Eliminate these entries (which might even delete the 163 // PHI node) now. 164 InvokeDest->removePredecessor(II->getParent()); 165} 166 167/// UpdateCallGraphAfterInlining - Once we have cloned code over from a callee 168/// into the caller, update the specified callgraph to reflect the changes we 169/// made. Note that it's possible that not all code was copied over, so only 170/// some edges of the callgraph may remain. 171static void UpdateCallGraphAfterInlining(CallSite CS, 172 Function::iterator FirstNewBlock, 173 ValueMap<const Value*, Value*> &VMap, 174 InlineFunctionInfo &IFI) { 175 CallGraph &CG = *IFI.CG; 176 const Function *Caller = CS.getInstruction()->getParent()->getParent(); 177 const Function *Callee = CS.getCalledFunction(); 178 CallGraphNode *CalleeNode = CG[Callee]; 179 CallGraphNode *CallerNode = CG[Caller]; 180 181 // Since we inlined some uninlined call sites in the callee into the caller, 182 // add edges from the caller to all of the callees of the callee. 183 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end(); 184 185 // Consider the case where CalleeNode == CallerNode. 186 CallGraphNode::CalledFunctionsVector CallCache; 187 if (CalleeNode == CallerNode) { 188 CallCache.assign(I, E); 189 I = CallCache.begin(); 190 E = CallCache.end(); 191 } 192 193 for (; I != E; ++I) { 194 const Value *OrigCall = I->first; 195 196 ValueMap<const Value*, Value*>::iterator VMI = VMap.find(OrigCall); 197 // Only copy the edge if the call was inlined! 198 if (VMI == VMap.end() || VMI->second == 0) 199 continue; 200 201 // If the call was inlined, but then constant folded, there is no edge to 202 // add. Check for this case. 203 Instruction *NewCall = dyn_cast<Instruction>(VMI->second); 204 if (NewCall == 0) continue; 205 206 // Remember that this call site got inlined for the client of 207 // InlineFunction. 208 IFI.InlinedCalls.push_back(NewCall); 209 210 // It's possible that inlining the callsite will cause it to go from an 211 // indirect to a direct call by resolving a function pointer. If this 212 // happens, set the callee of the new call site to a more precise 213 // destination. This can also happen if the call graph node of the caller 214 // was just unnecessarily imprecise. 215 if (I->second->getFunction() == 0) 216 if (Function *F = CallSite(NewCall).getCalledFunction()) { 217 // Indirect call site resolved to direct call. 218 CallerNode->addCalledFunction(CallSite(NewCall), CG[F]); 219 220 continue; 221 } 222 223 CallerNode->addCalledFunction(CallSite(NewCall), I->second); 224 } 225 226 // Update the call graph by deleting the edge from Callee to Caller. We must 227 // do this after the loop above in case Caller and Callee are the same. 228 CallerNode->removeCallEdgeFor(CS); 229} 230 231// InlineFunction - This function inlines the called function into the basic 232// block of the caller. This returns false if it is not possible to inline this 233// call. The program is still in a well defined state if this occurs though. 234// 235// Note that this only does one level of inlining. For example, if the 236// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now 237// exists in the instruction stream. Similiarly this will inline a recursive 238// function by one level. 239// 240bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) { 241 Instruction *TheCall = CS.getInstruction(); 242 LLVMContext &Context = TheCall->getContext(); 243 assert(TheCall->getParent() && TheCall->getParent()->getParent() && 244 "Instruction not in function!"); 245 246 // If IFI has any state in it, zap it before we fill it in. 247 IFI.reset(); 248 249 const Function *CalledFunc = CS.getCalledFunction(); 250 if (CalledFunc == 0 || // Can't inline external function or indirect 251 CalledFunc->isDeclaration() || // call, or call to a vararg function! 252 CalledFunc->getFunctionType()->isVarArg()) return false; 253 254 255 // If the call to the callee is not a tail call, we must clear the 'tail' 256 // flags on any calls that we inline. 257 bool MustClearTailCallFlags = 258 !(isa<CallInst>(TheCall) && cast<CallInst>(TheCall)->isTailCall()); 259 260 // If the call to the callee cannot throw, set the 'nounwind' flag on any 261 // calls that we inline. 262 bool MarkNoUnwind = CS.doesNotThrow(); 263 264 BasicBlock *OrigBB = TheCall->getParent(); 265 Function *Caller = OrigBB->getParent(); 266 267 // GC poses two hazards to inlining, which only occur when the callee has GC: 268 // 1. If the caller has no GC, then the callee's GC must be propagated to the 269 // caller. 270 // 2. If the caller has a differing GC, it is invalid to inline. 271 if (CalledFunc->hasGC()) { 272 if (!Caller->hasGC()) 273 Caller->setGC(CalledFunc->getGC()); 274 else if (CalledFunc->getGC() != Caller->getGC()) 275 return false; 276 } 277 278 // Get an iterator to the last basic block in the function, which will have 279 // the new function inlined after it. 280 // 281 Function::iterator LastBlock = &Caller->back(); 282 283 // Make sure to capture all of the return instructions from the cloned 284 // function. 285 SmallVector<ReturnInst*, 8> Returns; 286 ClonedCodeInfo InlinedFunctionInfo; 287 Function::iterator FirstNewBlock; 288 289 { // Scope to destroy VMap after cloning. 290 ValueMap<const Value*, Value*> VMap; 291 292 assert(CalledFunc->arg_size() == CS.arg_size() && 293 "No varargs calls can be inlined!"); 294 295 // Calculate the vector of arguments to pass into the function cloner, which 296 // matches up the formal to the actual argument values. 297 CallSite::arg_iterator AI = CS.arg_begin(); 298 unsigned ArgNo = 0; 299 for (Function::const_arg_iterator I = CalledFunc->arg_begin(), 300 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) { 301 Value *ActualArg = *AI; 302 303 // When byval arguments actually inlined, we need to make the copy implied 304 // by them explicit. However, we don't do this if the callee is readonly 305 // or readnone, because the copy would be unneeded: the callee doesn't 306 // modify the struct. 307 if (CalledFunc->paramHasAttr(ArgNo+1, Attribute::ByVal) && 308 !CalledFunc->onlyReadsMemory()) { 309 const Type *AggTy = cast<PointerType>(I->getType())->getElementType(); 310 const Type *VoidPtrTy = 311 Type::getInt8PtrTy(Context); 312 313 // Create the alloca. If we have TargetData, use nice alignment. 314 unsigned Align = 1; 315 if (IFI.TD) Align = IFI.TD->getPrefTypeAlignment(AggTy); 316 Value *NewAlloca = new AllocaInst(AggTy, 0, Align, 317 I->getName(), 318 &*Caller->begin()->begin()); 319 // Emit a memcpy. 320 const Type *Tys[3] = {VoidPtrTy, VoidPtrTy, Type::getInt64Ty(Context)}; 321 Function *MemCpyFn = Intrinsic::getDeclaration(Caller->getParent(), 322 Intrinsic::memcpy, 323 Tys, 3); 324 Value *DestCast = new BitCastInst(NewAlloca, VoidPtrTy, "tmp", TheCall); 325 Value *SrcCast = new BitCastInst(*AI, VoidPtrTy, "tmp", TheCall); 326 327 Value *Size; 328 if (IFI.TD == 0) 329 Size = ConstantExpr::getSizeOf(AggTy); 330 else 331 Size = ConstantInt::get(Type::getInt64Ty(Context), 332 IFI.TD->getTypeStoreSize(AggTy)); 333 334 // Always generate a memcpy of alignment 1 here because we don't know 335 // the alignment of the src pointer. Other optimizations can infer 336 // better alignment. 337 Value *CallArgs[] = { 338 DestCast, SrcCast, Size, 339 ConstantInt::get(Type::getInt32Ty(Context), 1), 340 ConstantInt::get(Type::getInt1Ty(Context), 0) 341 }; 342 CallInst *TheMemCpy = 343 CallInst::Create(MemCpyFn, CallArgs, CallArgs+5, "", TheCall); 344 345 // If we have a call graph, update it. 346 if (CallGraph *CG = IFI.CG) { 347 CallGraphNode *MemCpyCGN = CG->getOrInsertFunction(MemCpyFn); 348 CallGraphNode *CallerNode = (*CG)[Caller]; 349 CallerNode->addCalledFunction(TheMemCpy, MemCpyCGN); 350 } 351 352 // Uses of the argument in the function should use our new alloca 353 // instead. 354 ActualArg = NewAlloca; 355 356 // Calls that we inline may use the new alloca, so we need to clear 357 // their 'tail' flags. 358 MustClearTailCallFlags = true; 359 } 360 361 VMap[I] = ActualArg; 362 } 363 364 // We want the inliner to prune the code as it copies. We would LOVE to 365 // have no dead or constant instructions leftover after inlining occurs 366 // (which can happen, e.g., because an argument was constant), but we'll be 367 // happy with whatever the cloner can do. 368 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap, Returns, ".i", 369 &InlinedFunctionInfo, IFI.TD, TheCall); 370 371 // Remember the first block that is newly cloned over. 372 FirstNewBlock = LastBlock; ++FirstNewBlock; 373 374 // Update the callgraph if requested. 375 if (IFI.CG) 376 UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI); 377 } 378 379 // If there are any alloca instructions in the block that used to be the entry 380 // block for the callee, move them to the entry block of the caller. First 381 // calculate which instruction they should be inserted before. We insert the 382 // instructions at the end of the current alloca list. 383 // 384 { 385 BasicBlock::iterator InsertPoint = Caller->begin()->begin(); 386 for (BasicBlock::iterator I = FirstNewBlock->begin(), 387 E = FirstNewBlock->end(); I != E; ) { 388 AllocaInst *AI = dyn_cast<AllocaInst>(I++); 389 if (AI == 0) continue; 390 391 // If the alloca is now dead, remove it. This often occurs due to code 392 // specialization. 393 if (AI->use_empty()) { 394 AI->eraseFromParent(); 395 continue; 396 } 397 398 if (!isa<Constant>(AI->getArraySize())) 399 continue; 400 401 // Keep track of the static allocas that we inline into the caller if the 402 // StaticAllocas pointer is non-null. 403 IFI.StaticAllocas.push_back(AI); 404 405 // Scan for the block of allocas that we can move over, and move them 406 // all at once. 407 while (isa<AllocaInst>(I) && 408 isa<Constant>(cast<AllocaInst>(I)->getArraySize())) { 409 IFI.StaticAllocas.push_back(cast<AllocaInst>(I)); 410 ++I; 411 } 412 413 // Transfer all of the allocas over in a block. Using splice means 414 // that the instructions aren't removed from the symbol table, then 415 // reinserted. 416 Caller->getEntryBlock().getInstList().splice(InsertPoint, 417 FirstNewBlock->getInstList(), 418 AI, I); 419 } 420 } 421 422 // If the inlined code contained dynamic alloca instructions, wrap the inlined 423 // code with llvm.stacksave/llvm.stackrestore intrinsics. 424 if (InlinedFunctionInfo.ContainsDynamicAllocas) { 425 Module *M = Caller->getParent(); 426 // Get the two intrinsics we care about. 427 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave); 428 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore); 429 430 // If we are preserving the callgraph, add edges to the stacksave/restore 431 // functions for the calls we insert. 432 CallGraphNode *StackSaveCGN = 0, *StackRestoreCGN = 0, *CallerNode = 0; 433 if (CallGraph *CG = IFI.CG) { 434 StackSaveCGN = CG->getOrInsertFunction(StackSave); 435 StackRestoreCGN = CG->getOrInsertFunction(StackRestore); 436 CallerNode = (*CG)[Caller]; 437 } 438 439 // Insert the llvm.stacksave. 440 CallInst *SavedPtr = CallInst::Create(StackSave, "savedstack", 441 FirstNewBlock->begin()); 442 if (IFI.CG) CallerNode->addCalledFunction(SavedPtr, StackSaveCGN); 443 444 // Insert a call to llvm.stackrestore before any return instructions in the 445 // inlined function. 446 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 447 CallInst *CI = CallInst::Create(StackRestore, SavedPtr, "", Returns[i]); 448 if (IFI.CG) CallerNode->addCalledFunction(CI, StackRestoreCGN); 449 } 450 451 // Count the number of StackRestore calls we insert. 452 unsigned NumStackRestores = Returns.size(); 453 454 // If we are inlining an invoke instruction, insert restores before each 455 // unwind. These unwinds will be rewritten into branches later. 456 if (InlinedFunctionInfo.ContainsUnwinds && isa<InvokeInst>(TheCall)) { 457 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); 458 BB != E; ++BB) 459 if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) { 460 CallInst *CI = CallInst::Create(StackRestore, SavedPtr, "", UI); 461 if (IFI.CG) CallerNode->addCalledFunction(CI, StackRestoreCGN); 462 ++NumStackRestores; 463 } 464 } 465 } 466 467 // If we are inlining tail call instruction through a call site that isn't 468 // marked 'tail', we must remove the tail marker for any calls in the inlined 469 // code. Also, calls inlined through a 'nounwind' call site should be marked 470 // 'nounwind'. 471 if (InlinedFunctionInfo.ContainsCalls && 472 (MustClearTailCallFlags || MarkNoUnwind)) { 473 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); 474 BB != E; ++BB) 475 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) 476 if (CallInst *CI = dyn_cast<CallInst>(I)) { 477 if (MustClearTailCallFlags) 478 CI->setTailCall(false); 479 if (MarkNoUnwind) 480 CI->setDoesNotThrow(); 481 } 482 } 483 484 // If we are inlining through a 'nounwind' call site then any inlined 'unwind' 485 // instructions are unreachable. 486 if (InlinedFunctionInfo.ContainsUnwinds && MarkNoUnwind) 487 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); 488 BB != E; ++BB) { 489 TerminatorInst *Term = BB->getTerminator(); 490 if (isa<UnwindInst>(Term)) { 491 new UnreachableInst(Context, Term); 492 BB->getInstList().erase(Term); 493 } 494 } 495 496 // If we are inlining for an invoke instruction, we must make sure to rewrite 497 // any inlined 'unwind' instructions into branches to the invoke exception 498 // destination, and call instructions into invoke instructions. 499 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) 500 HandleInlinedInvoke(II, FirstNewBlock, InlinedFunctionInfo); 501 502 // If we cloned in _exactly one_ basic block, and if that block ends in a 503 // return instruction, we splice the body of the inlined callee directly into 504 // the calling basic block. 505 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) { 506 // Move all of the instructions right before the call. 507 OrigBB->getInstList().splice(TheCall, FirstNewBlock->getInstList(), 508 FirstNewBlock->begin(), FirstNewBlock->end()); 509 // Remove the cloned basic block. 510 Caller->getBasicBlockList().pop_back(); 511 512 // If the call site was an invoke instruction, add a branch to the normal 513 // destination. 514 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) 515 BranchInst::Create(II->getNormalDest(), TheCall); 516 517 // If the return instruction returned a value, replace uses of the call with 518 // uses of the returned value. 519 if (!TheCall->use_empty()) { 520 ReturnInst *R = Returns[0]; 521 if (TheCall == R->getReturnValue()) 522 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 523 else 524 TheCall->replaceAllUsesWith(R->getReturnValue()); 525 } 526 // Since we are now done with the Call/Invoke, we can delete it. 527 TheCall->eraseFromParent(); 528 529 // Since we are now done with the return instruction, delete it also. 530 Returns[0]->eraseFromParent(); 531 532 // We are now done with the inlining. 533 return true; 534 } 535 536 // Otherwise, we have the normal case, of more than one block to inline or 537 // multiple return sites. 538 539 // We want to clone the entire callee function into the hole between the 540 // "starter" and "ender" blocks. How we accomplish this depends on whether 541 // this is an invoke instruction or a call instruction. 542 BasicBlock *AfterCallBB; 543 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) { 544 545 // Add an unconditional branch to make this look like the CallInst case... 546 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall); 547 548 // Split the basic block. This guarantees that no PHI nodes will have to be 549 // updated due to new incoming edges, and make the invoke case more 550 // symmetric to the call case. 551 AfterCallBB = OrigBB->splitBasicBlock(NewBr, 552 CalledFunc->getName()+".exit"); 553 554 } else { // It's a call 555 // If this is a call instruction, we need to split the basic block that 556 // the call lives in. 557 // 558 AfterCallBB = OrigBB->splitBasicBlock(TheCall, 559 CalledFunc->getName()+".exit"); 560 } 561 562 // Change the branch that used to go to AfterCallBB to branch to the first 563 // basic block of the inlined function. 564 // 565 TerminatorInst *Br = OrigBB->getTerminator(); 566 assert(Br && Br->getOpcode() == Instruction::Br && 567 "splitBasicBlock broken!"); 568 Br->setOperand(0, FirstNewBlock); 569 570 571 // Now that the function is correct, make it a little bit nicer. In 572 // particular, move the basic blocks inserted from the end of the function 573 // into the space made by splitting the source basic block. 574 Caller->getBasicBlockList().splice(AfterCallBB, Caller->getBasicBlockList(), 575 FirstNewBlock, Caller->end()); 576 577 // Handle all of the return instructions that we just cloned in, and eliminate 578 // any users of the original call/invoke instruction. 579 const Type *RTy = CalledFunc->getReturnType(); 580 581 if (Returns.size() > 1) { 582 // The PHI node should go at the front of the new basic block to merge all 583 // possible incoming values. 584 PHINode *PHI = 0; 585 if (!TheCall->use_empty()) { 586 PHI = PHINode::Create(RTy, TheCall->getName(), 587 AfterCallBB->begin()); 588 // Anything that used the result of the function call should now use the 589 // PHI node as their operand. 590 TheCall->replaceAllUsesWith(PHI); 591 } 592 593 // Loop over all of the return instructions adding entries to the PHI node 594 // as appropriate. 595 if (PHI) { 596 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 597 ReturnInst *RI = Returns[i]; 598 assert(RI->getReturnValue()->getType() == PHI->getType() && 599 "Ret value not consistent in function!"); 600 PHI->addIncoming(RI->getReturnValue(), RI->getParent()); 601 } 602 603 // Now that we inserted the PHI, check to see if it has a single value 604 // (e.g. all the entries are the same or undef). If so, remove the PHI so 605 // it doesn't block other optimizations. 606 if (Value *V = PHI->hasConstantValue()) { 607 PHI->replaceAllUsesWith(V); 608 PHI->eraseFromParent(); 609 } 610 } 611 612 613 // Add a branch to the merge points and remove return instructions. 614 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 615 ReturnInst *RI = Returns[i]; 616 BranchInst::Create(AfterCallBB, RI); 617 RI->eraseFromParent(); 618 } 619 } else if (!Returns.empty()) { 620 // Otherwise, if there is exactly one return value, just replace anything 621 // using the return value of the call with the computed value. 622 if (!TheCall->use_empty()) { 623 if (TheCall == Returns[0]->getReturnValue()) 624 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 625 else 626 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue()); 627 } 628 629 // Splice the code from the return block into the block that it will return 630 // to, which contains the code that was after the call. 631 BasicBlock *ReturnBB = Returns[0]->getParent(); 632 AfterCallBB->getInstList().splice(AfterCallBB->begin(), 633 ReturnBB->getInstList()); 634 635 // Update PHI nodes that use the ReturnBB to use the AfterCallBB. 636 ReturnBB->replaceAllUsesWith(AfterCallBB); 637 638 // Delete the return instruction now and empty ReturnBB now. 639 Returns[0]->eraseFromParent(); 640 ReturnBB->eraseFromParent(); 641 } else if (!TheCall->use_empty()) { 642 // No returns, but something is using the return value of the call. Just 643 // nuke the result. 644 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 645 } 646 647 // Since we are now done with the Call/Invoke, we can delete it. 648 TheCall->eraseFromParent(); 649 650 // We should always be able to fold the entry block of the function into the 651 // single predecessor of the block... 652 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!"); 653 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0); 654 655 // Splice the code entry block into calling block, right before the 656 // unconditional branch. 657 OrigBB->getInstList().splice(Br, CalleeEntry->getInstList()); 658 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes 659 660 // Remove the unconditional branch. 661 OrigBB->getInstList().erase(Br); 662 663 // Now we can remove the CalleeEntry block, which is now empty. 664 Caller->getBasicBlockList().erase(CalleeEntry); 665 666 return true; 667} 668