InlineFunction.cpp revision 8833ef03b9ceaa52063116819fff8b3d16fd8933
1//===- InlineFunction.cpp - Code to perform function inlining -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements inlining of a function into a call site, resolving 11// parameters and the return value as appropriate. 12// 13//===----------------------------------------------------------------------===// 14 15#include "llvm/Transforms/Utils/Cloning.h" 16#include "llvm/Constants.h" 17#include "llvm/DerivedTypes.h" 18#include "llvm/Module.h" 19#include "llvm/Instructions.h" 20#include "llvm/IntrinsicInst.h" 21#include "llvm/Intrinsics.h" 22#include "llvm/Attributes.h" 23#include "llvm/Analysis/CallGraph.h" 24#include "llvm/Analysis/DebugInfo.h" 25#include "llvm/Analysis/InstructionSimplify.h" 26#include "llvm/Target/TargetData.h" 27#include "llvm/Transforms/Utils/Local.h" 28#include "llvm/ADT/SmallVector.h" 29#include "llvm/ADT/StringExtras.h" 30#include "llvm/Support/CallSite.h" 31#include "llvm/Support/IRBuilder.h" 32using namespace llvm; 33 34bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI) { 35 return InlineFunction(CallSite(CI), IFI); 36} 37bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI) { 38 return InlineFunction(CallSite(II), IFI); 39} 40 41namespace { 42 /// A class for recording information about inlining through an invoke. 43 class InvokeInliningInfo { 44 BasicBlock *OuterResumeDest; //< Destination of the invoke's unwind. 45 BasicBlock *InnerResumeDest; //< Destination for the callee's resume. 46 LandingPadInst *CallerLPad; //< LandingPadInst associated with the invoke. 47 PHINode *InnerEHValuesPHI; //< PHI for EH values from landingpad insts. 48 SmallVector<Value*, 8> UnwindDestPHIValues; 49 50 public: 51 InvokeInliningInfo(InvokeInst *II) 52 : OuterResumeDest(II->getUnwindDest()), InnerResumeDest(0), 53 CallerLPad(0), InnerEHValuesPHI(0) { 54 // If there are PHI nodes in the unwind destination block, we need to keep 55 // track of which values came into them from the invoke before removing 56 // the edge from this block. 57 llvm::BasicBlock *InvokeBB = II->getParent(); 58 BasicBlock::iterator I = OuterResumeDest->begin(); 59 for (; isa<PHINode>(I); ++I) { 60 // Save the value to use for this edge. 61 PHINode *PHI = cast<PHINode>(I); 62 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB)); 63 } 64 65 CallerLPad = cast<LandingPadInst>(I); 66 } 67 68 /// getOuterResumeDest - The outer unwind destination is the target of 69 /// unwind edges introduced for calls within the inlined function. 70 BasicBlock *getOuterResumeDest() const { 71 return OuterResumeDest; 72 } 73 74 BasicBlock *getInnerResumeDest(); 75 76 LandingPadInst *getLandingPadInst() const { return CallerLPad; } 77 78 /// forwardResume - Forward the 'resume' instruction to the caller's landing 79 /// pad block. When the landing pad block has only one predecessor, this is 80 /// a simple branch. When there is more than one predecessor, we need to 81 /// split the landing pad block after the landingpad instruction and jump 82 /// to there. 83 void forwardResume(ResumeInst *RI); 84 85 /// addIncomingPHIValuesFor - Add incoming-PHI values to the unwind 86 /// destination block for the given basic block, using the values for the 87 /// original invoke's source block. 88 void addIncomingPHIValuesFor(BasicBlock *BB) const { 89 addIncomingPHIValuesForInto(BB, OuterResumeDest); 90 } 91 92 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const { 93 BasicBlock::iterator I = dest->begin(); 94 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) { 95 PHINode *phi = cast<PHINode>(I); 96 phi->addIncoming(UnwindDestPHIValues[i], src); 97 } 98 } 99 }; 100} 101 102/// getInnerResumeDest - Get or create a target for the branch from ResumeInsts. 103BasicBlock *InvokeInliningInfo::getInnerResumeDest() { 104 if (InnerResumeDest) return InnerResumeDest; 105 106 // Split the landing pad. 107 BasicBlock::iterator SplitPoint = CallerLPad; ++SplitPoint; 108 InnerResumeDest = 109 OuterResumeDest->splitBasicBlock(SplitPoint, 110 OuterResumeDest->getName() + ".body"); 111 112 // The number of incoming edges we expect to the inner landing pad. 113 const unsigned PHICapacity = 2; 114 115 // Create corresponding new PHIs for all the PHIs in the outer landing pad. 116 BasicBlock::iterator InsertPoint = InnerResumeDest->begin(); 117 BasicBlock::iterator I = OuterResumeDest->begin(); 118 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) { 119 PHINode *OuterPHI = cast<PHINode>(I); 120 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity, 121 OuterPHI->getName() + ".lpad-body", 122 InsertPoint); 123 OuterPHI->replaceAllUsesWith(InnerPHI); 124 InnerPHI->addIncoming(OuterPHI, OuterResumeDest); 125 } 126 127 // Create a PHI for the exception values. 128 InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity, 129 "eh.lpad-body", InsertPoint); 130 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI); 131 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest); 132 133 // All done. 134 return InnerResumeDest; 135} 136 137/// forwardResume - Forward the 'resume' instruction to the caller's landing pad 138/// block. When the landing pad block has only one predecessor, this is a simple 139/// branch. When there is more than one predecessor, we need to split the 140/// landing pad block after the landingpad instruction and jump to there. 141void InvokeInliningInfo::forwardResume(ResumeInst *RI) { 142 BasicBlock *Dest = getInnerResumeDest(); 143 BasicBlock *Src = RI->getParent(); 144 145 BranchInst::Create(Dest, Src); 146 147 // Update the PHIs in the destination. They were inserted in an order which 148 // makes this work. 149 addIncomingPHIValuesForInto(Src, Dest); 150 151 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src); 152 RI->eraseFromParent(); 153} 154 155/// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into 156/// an invoke, we have to turn all of the calls that can throw into 157/// invokes. This function analyze BB to see if there are any calls, and if so, 158/// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI 159/// nodes in that block with the values specified in InvokeDestPHIValues. 160/// 161/// Returns true to indicate that the next block should be skipped. 162static bool HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, 163 InvokeInliningInfo &Invoke) { 164 LandingPadInst *LPI = Invoke.getLandingPadInst(); 165 166 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) { 167 Instruction *I = BBI++; 168 169 if (LandingPadInst *L = dyn_cast<LandingPadInst>(I)) { 170 unsigned NumClauses = LPI->getNumClauses(); 171 L->reserveClauses(NumClauses); 172 for (unsigned i = 0; i != NumClauses; ++i) 173 L->addClause(LPI->getClause(i)); 174 } 175 176 // We only need to check for function calls: inlined invoke 177 // instructions require no special handling. 178 CallInst *CI = dyn_cast<CallInst>(I); 179 180 // If this call cannot unwind, don't convert it to an invoke. 181 if (!CI || CI->doesNotThrow()) 182 continue; 183 184 // Convert this function call into an invoke instruction. First, split the 185 // basic block. 186 BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc"); 187 188 // Delete the unconditional branch inserted by splitBasicBlock 189 BB->getInstList().pop_back(); 190 191 // Create the new invoke instruction. 192 ImmutableCallSite CS(CI); 193 SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end()); 194 InvokeInst *II = InvokeInst::Create(CI->getCalledValue(), Split, 195 Invoke.getOuterResumeDest(), 196 InvokeArgs, CI->getName(), BB); 197 II->setCallingConv(CI->getCallingConv()); 198 II->setAttributes(CI->getAttributes()); 199 200 // Make sure that anything using the call now uses the invoke! This also 201 // updates the CallGraph if present, because it uses a WeakVH. 202 CI->replaceAllUsesWith(II); 203 204 // Delete the original call 205 Split->getInstList().pop_front(); 206 207 // Update any PHI nodes in the exceptional block to indicate that there is 208 // now a new entry in them. 209 Invoke.addIncomingPHIValuesFor(BB); 210 return false; 211 } 212 213 return false; 214} 215 216/// HandleInlinedInvoke - If we inlined an invoke site, we need to convert calls 217/// in the body of the inlined function into invokes. 218/// 219/// II is the invoke instruction being inlined. FirstNewBlock is the first 220/// block of the inlined code (the last block is the end of the function), 221/// and InlineCodeInfo is information about the code that got inlined. 222static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock, 223 ClonedCodeInfo &InlinedCodeInfo) { 224 BasicBlock *InvokeDest = II->getUnwindDest(); 225 226 Function *Caller = FirstNewBlock->getParent(); 227 228 // The inlined code is currently at the end of the function, scan from the 229 // start of the inlined code to its end, checking for stuff we need to 230 // rewrite. If the code doesn't have calls or unwinds, we know there is 231 // nothing to rewrite. 232 if (!InlinedCodeInfo.ContainsCalls) { 233 // Now that everything is happy, we have one final detail. The PHI nodes in 234 // the exception destination block still have entries due to the original 235 // invoke instruction. Eliminate these entries (which might even delete the 236 // PHI node) now. 237 InvokeDest->removePredecessor(II->getParent()); 238 return; 239 } 240 241 InvokeInliningInfo Invoke(II); 242 243 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; ++BB){ 244 if (InlinedCodeInfo.ContainsCalls) 245 if (HandleCallsInBlockInlinedThroughInvoke(BB, Invoke)) { 246 // Honor a request to skip the next block. 247 ++BB; 248 continue; 249 } 250 251 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator())) 252 Invoke.forwardResume(RI); 253 } 254 255 // Now that everything is happy, we have one final detail. The PHI nodes in 256 // the exception destination block still have entries due to the original 257 // invoke instruction. Eliminate these entries (which might even delete the 258 // PHI node) now. 259 InvokeDest->removePredecessor(II->getParent()); 260} 261 262/// UpdateCallGraphAfterInlining - Once we have cloned code over from a callee 263/// into the caller, update the specified callgraph to reflect the changes we 264/// made. Note that it's possible that not all code was copied over, so only 265/// some edges of the callgraph may remain. 266static void UpdateCallGraphAfterInlining(CallSite CS, 267 Function::iterator FirstNewBlock, 268 ValueToValueMapTy &VMap, 269 InlineFunctionInfo &IFI) { 270 CallGraph &CG = *IFI.CG; 271 const Function *Caller = CS.getInstruction()->getParent()->getParent(); 272 const Function *Callee = CS.getCalledFunction(); 273 CallGraphNode *CalleeNode = CG[Callee]; 274 CallGraphNode *CallerNode = CG[Caller]; 275 276 // Since we inlined some uninlined call sites in the callee into the caller, 277 // add edges from the caller to all of the callees of the callee. 278 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end(); 279 280 // Consider the case where CalleeNode == CallerNode. 281 CallGraphNode::CalledFunctionsVector CallCache; 282 if (CalleeNode == CallerNode) { 283 CallCache.assign(I, E); 284 I = CallCache.begin(); 285 E = CallCache.end(); 286 } 287 288 for (; I != E; ++I) { 289 const Value *OrigCall = I->first; 290 291 ValueToValueMapTy::iterator VMI = VMap.find(OrigCall); 292 // Only copy the edge if the call was inlined! 293 if (VMI == VMap.end() || VMI->second == 0) 294 continue; 295 296 // If the call was inlined, but then constant folded, there is no edge to 297 // add. Check for this case. 298 Instruction *NewCall = dyn_cast<Instruction>(VMI->second); 299 if (NewCall == 0) continue; 300 301 // Remember that this call site got inlined for the client of 302 // InlineFunction. 303 IFI.InlinedCalls.push_back(NewCall); 304 305 // It's possible that inlining the callsite will cause it to go from an 306 // indirect to a direct call by resolving a function pointer. If this 307 // happens, set the callee of the new call site to a more precise 308 // destination. This can also happen if the call graph node of the caller 309 // was just unnecessarily imprecise. 310 if (I->second->getFunction() == 0) 311 if (Function *F = CallSite(NewCall).getCalledFunction()) { 312 // Indirect call site resolved to direct call. 313 CallerNode->addCalledFunction(CallSite(NewCall), CG[F]); 314 315 continue; 316 } 317 318 CallerNode->addCalledFunction(CallSite(NewCall), I->second); 319 } 320 321 // Update the call graph by deleting the edge from Callee to Caller. We must 322 // do this after the loop above in case Caller and Callee are the same. 323 CallerNode->removeCallEdgeFor(CS); 324} 325 326/// HandleByValArgument - When inlining a call site that has a byval argument, 327/// we have to make the implicit memcpy explicit by adding it. 328static Value *HandleByValArgument(Value *Arg, Instruction *TheCall, 329 const Function *CalledFunc, 330 InlineFunctionInfo &IFI, 331 unsigned ByValAlignment) { 332 Type *AggTy = cast<PointerType>(Arg->getType())->getElementType(); 333 334 // If the called function is readonly, then it could not mutate the caller's 335 // copy of the byval'd memory. In this case, it is safe to elide the copy and 336 // temporary. 337 if (CalledFunc->onlyReadsMemory()) { 338 // If the byval argument has a specified alignment that is greater than the 339 // passed in pointer, then we either have to round up the input pointer or 340 // give up on this transformation. 341 if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment. 342 return Arg; 343 344 // If the pointer is already known to be sufficiently aligned, or if we can 345 // round it up to a larger alignment, then we don't need a temporary. 346 if (getOrEnforceKnownAlignment(Arg, ByValAlignment, 347 IFI.TD) >= ByValAlignment) 348 return Arg; 349 350 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad 351 // for code quality, but rarely happens and is required for correctness. 352 } 353 354 LLVMContext &Context = Arg->getContext(); 355 356 Type *VoidPtrTy = Type::getInt8PtrTy(Context); 357 358 // Create the alloca. If we have TargetData, use nice alignment. 359 unsigned Align = 1; 360 if (IFI.TD) 361 Align = IFI.TD->getPrefTypeAlignment(AggTy); 362 363 // If the byval had an alignment specified, we *must* use at least that 364 // alignment, as it is required by the byval argument (and uses of the 365 // pointer inside the callee). 366 Align = std::max(Align, ByValAlignment); 367 368 Function *Caller = TheCall->getParent()->getParent(); 369 370 Value *NewAlloca = new AllocaInst(AggTy, 0, Align, Arg->getName(), 371 &*Caller->begin()->begin()); 372 // Emit a memcpy. 373 Type *Tys[3] = {VoidPtrTy, VoidPtrTy, Type::getInt64Ty(Context)}; 374 Function *MemCpyFn = Intrinsic::getDeclaration(Caller->getParent(), 375 Intrinsic::memcpy, 376 Tys); 377 Value *DestCast = new BitCastInst(NewAlloca, VoidPtrTy, "tmp", TheCall); 378 Value *SrcCast = new BitCastInst(Arg, VoidPtrTy, "tmp", TheCall); 379 380 Value *Size; 381 if (IFI.TD == 0) 382 Size = ConstantExpr::getSizeOf(AggTy); 383 else 384 Size = ConstantInt::get(Type::getInt64Ty(Context), 385 IFI.TD->getTypeStoreSize(AggTy)); 386 387 // Always generate a memcpy of alignment 1 here because we don't know 388 // the alignment of the src pointer. Other optimizations can infer 389 // better alignment. 390 Value *CallArgs[] = { 391 DestCast, SrcCast, Size, 392 ConstantInt::get(Type::getInt32Ty(Context), 1), 393 ConstantInt::getFalse(Context) // isVolatile 394 }; 395 IRBuilder<>(TheCall).CreateCall(MemCpyFn, CallArgs); 396 397 // Uses of the argument in the function should use our new alloca 398 // instead. 399 return NewAlloca; 400} 401 402// isUsedByLifetimeMarker - Check whether this Value is used by a lifetime 403// intrinsic. 404static bool isUsedByLifetimeMarker(Value *V) { 405 for (Value::use_iterator UI = V->use_begin(), UE = V->use_end(); UI != UE; 406 ++UI) { 407 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(*UI)) { 408 switch (II->getIntrinsicID()) { 409 default: break; 410 case Intrinsic::lifetime_start: 411 case Intrinsic::lifetime_end: 412 return true; 413 } 414 } 415 } 416 return false; 417} 418 419// hasLifetimeMarkers - Check whether the given alloca already has 420// lifetime.start or lifetime.end intrinsics. 421static bool hasLifetimeMarkers(AllocaInst *AI) { 422 Type *Int8PtrTy = Type::getInt8PtrTy(AI->getType()->getContext()); 423 if (AI->getType() == Int8PtrTy) 424 return isUsedByLifetimeMarker(AI); 425 426 // Do a scan to find all the casts to i8*. 427 for (Value::use_iterator I = AI->use_begin(), E = AI->use_end(); I != E; 428 ++I) { 429 if (I->getType() != Int8PtrTy) continue; 430 if (I->stripPointerCasts() != AI) continue; 431 if (isUsedByLifetimeMarker(*I)) 432 return true; 433 } 434 return false; 435} 436 437/// updateInlinedAtInfo - Helper function used by fixupLineNumbers to recursively 438/// update InlinedAtEntry of a DebugLoc. 439static DebugLoc updateInlinedAtInfo(const DebugLoc &DL, 440 const DebugLoc &InlinedAtDL, 441 LLVMContext &Ctx) { 442 if (MDNode *IA = DL.getInlinedAt(Ctx)) { 443 DebugLoc NewInlinedAtDL 444 = updateInlinedAtInfo(DebugLoc::getFromDILocation(IA), InlinedAtDL, Ctx); 445 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(Ctx), 446 NewInlinedAtDL.getAsMDNode(Ctx)); 447 } 448 449 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(Ctx), 450 InlinedAtDL.getAsMDNode(Ctx)); 451} 452 453/// fixupLineNumbers - Update inlined instructions' line numbers to 454/// to encode location where these instructions are inlined. 455static void fixupLineNumbers(Function *Fn, Function::iterator FI, 456 Instruction *TheCall) { 457 DebugLoc TheCallDL = TheCall->getDebugLoc(); 458 if (TheCallDL.isUnknown()) 459 return; 460 461 for (; FI != Fn->end(); ++FI) { 462 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end(); 463 BI != BE; ++BI) { 464 DebugLoc DL = BI->getDebugLoc(); 465 if (!DL.isUnknown()) { 466 BI->setDebugLoc(updateInlinedAtInfo(DL, TheCallDL, BI->getContext())); 467 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(BI)) { 468 LLVMContext &Ctx = BI->getContext(); 469 MDNode *InlinedAt = BI->getDebugLoc().getInlinedAt(Ctx); 470 DVI->setOperand(2, createInlinedVariable(DVI->getVariable(), 471 InlinedAt, Ctx)); 472 } 473 } 474 } 475 } 476} 477 478/// InlineFunction - This function inlines the called function into the basic 479/// block of the caller. This returns false if it is not possible to inline 480/// this call. The program is still in a well defined state if this occurs 481/// though. 482/// 483/// Note that this only does one level of inlining. For example, if the 484/// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now 485/// exists in the instruction stream. Similarly this will inline a recursive 486/// function by one level. 487bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) { 488 Instruction *TheCall = CS.getInstruction(); 489 assert(TheCall->getParent() && TheCall->getParent()->getParent() && 490 "Instruction not in function!"); 491 492 // If IFI has any state in it, zap it before we fill it in. 493 IFI.reset(); 494 495 const Function *CalledFunc = CS.getCalledFunction(); 496 if (CalledFunc == 0 || // Can't inline external function or indirect 497 CalledFunc->isDeclaration() || // call, or call to a vararg function! 498 CalledFunc->getFunctionType()->isVarArg()) return false; 499 500 // If the call to the callee is not a tail call, we must clear the 'tail' 501 // flags on any calls that we inline. 502 bool MustClearTailCallFlags = 503 !(isa<CallInst>(TheCall) && cast<CallInst>(TheCall)->isTailCall()); 504 505 // If the call to the callee cannot throw, set the 'nounwind' flag on any 506 // calls that we inline. 507 bool MarkNoUnwind = CS.doesNotThrow(); 508 509 BasicBlock *OrigBB = TheCall->getParent(); 510 Function *Caller = OrigBB->getParent(); 511 512 // GC poses two hazards to inlining, which only occur when the callee has GC: 513 // 1. If the caller has no GC, then the callee's GC must be propagated to the 514 // caller. 515 // 2. If the caller has a differing GC, it is invalid to inline. 516 if (CalledFunc->hasGC()) { 517 if (!Caller->hasGC()) 518 Caller->setGC(CalledFunc->getGC()); 519 else if (CalledFunc->getGC() != Caller->getGC()) 520 return false; 521 } 522 523 // Get the personality function from the callee if it contains a landing pad. 524 Value *CalleePersonality = 0; 525 for (Function::const_iterator I = CalledFunc->begin(), E = CalledFunc->end(); 526 I != E; ++I) 527 if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) { 528 const BasicBlock *BB = II->getUnwindDest(); 529 const LandingPadInst *LP = BB->getLandingPadInst(); 530 CalleePersonality = LP->getPersonalityFn(); 531 break; 532 } 533 534 // Find the personality function used by the landing pads of the caller. If it 535 // exists, then check to see that it matches the personality function used in 536 // the callee. 537 if (CalleePersonality) { 538 for (Function::const_iterator I = Caller->begin(), E = Caller->end(); 539 I != E; ++I) 540 if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) { 541 const BasicBlock *BB = II->getUnwindDest(); 542 const LandingPadInst *LP = BB->getLandingPadInst(); 543 544 // If the personality functions match, then we can perform the 545 // inlining. Otherwise, we can't inline. 546 // TODO: This isn't 100% true. Some personality functions are proper 547 // supersets of others and can be used in place of the other. 548 if (LP->getPersonalityFn() != CalleePersonality) 549 return false; 550 551 break; 552 } 553 } 554 555 // Get an iterator to the last basic block in the function, which will have 556 // the new function inlined after it. 557 Function::iterator LastBlock = &Caller->back(); 558 559 // Make sure to capture all of the return instructions from the cloned 560 // function. 561 SmallVector<ReturnInst*, 8> Returns; 562 ClonedCodeInfo InlinedFunctionInfo; 563 Function::iterator FirstNewBlock; 564 565 { // Scope to destroy VMap after cloning. 566 ValueToValueMapTy VMap; 567 568 assert(CalledFunc->arg_size() == CS.arg_size() && 569 "No varargs calls can be inlined!"); 570 571 // Calculate the vector of arguments to pass into the function cloner, which 572 // matches up the formal to the actual argument values. 573 CallSite::arg_iterator AI = CS.arg_begin(); 574 unsigned ArgNo = 0; 575 for (Function::const_arg_iterator I = CalledFunc->arg_begin(), 576 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) { 577 Value *ActualArg = *AI; 578 579 // When byval arguments actually inlined, we need to make the copy implied 580 // by them explicit. However, we don't do this if the callee is readonly 581 // or readnone, because the copy would be unneeded: the callee doesn't 582 // modify the struct. 583 if (CS.isByValArgument(ArgNo)) { 584 ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI, 585 CalledFunc->getParamAlignment(ArgNo+1)); 586 587 // Calls that we inline may use the new alloca, so we need to clear 588 // their 'tail' flags if HandleByValArgument introduced a new alloca and 589 // the callee has calls. 590 MustClearTailCallFlags |= ActualArg != *AI; 591 } 592 593 VMap[I] = ActualArg; 594 } 595 596 // We want the inliner to prune the code as it copies. We would LOVE to 597 // have no dead or constant instructions leftover after inlining occurs 598 // (which can happen, e.g., because an argument was constant), but we'll be 599 // happy with whatever the cloner can do. 600 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap, 601 /*ModuleLevelChanges=*/false, Returns, ".i", 602 &InlinedFunctionInfo, IFI.TD, TheCall); 603 604 // Remember the first block that is newly cloned over. 605 FirstNewBlock = LastBlock; ++FirstNewBlock; 606 607 // Update the callgraph if requested. 608 if (IFI.CG) 609 UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI); 610 611 // Update inlined instructions' line number information. 612 fixupLineNumbers(Caller, FirstNewBlock, TheCall); 613 } 614 615 // If there are any alloca instructions in the block that used to be the entry 616 // block for the callee, move them to the entry block of the caller. First 617 // calculate which instruction they should be inserted before. We insert the 618 // instructions at the end of the current alloca list. 619 { 620 BasicBlock::iterator InsertPoint = Caller->begin()->begin(); 621 for (BasicBlock::iterator I = FirstNewBlock->begin(), 622 E = FirstNewBlock->end(); I != E; ) { 623 AllocaInst *AI = dyn_cast<AllocaInst>(I++); 624 if (AI == 0) continue; 625 626 // If the alloca is now dead, remove it. This often occurs due to code 627 // specialization. 628 if (AI->use_empty()) { 629 AI->eraseFromParent(); 630 continue; 631 } 632 633 if (!isa<Constant>(AI->getArraySize())) 634 continue; 635 636 // Keep track of the static allocas that we inline into the caller. 637 IFI.StaticAllocas.push_back(AI); 638 639 // Scan for the block of allocas that we can move over, and move them 640 // all at once. 641 while (isa<AllocaInst>(I) && 642 isa<Constant>(cast<AllocaInst>(I)->getArraySize())) { 643 IFI.StaticAllocas.push_back(cast<AllocaInst>(I)); 644 ++I; 645 } 646 647 // Transfer all of the allocas over in a block. Using splice means 648 // that the instructions aren't removed from the symbol table, then 649 // reinserted. 650 Caller->getEntryBlock().getInstList().splice(InsertPoint, 651 FirstNewBlock->getInstList(), 652 AI, I); 653 } 654 } 655 656 // Leave lifetime markers for the static alloca's, scoping them to the 657 // function we just inlined. 658 if (!IFI.StaticAllocas.empty()) { 659 IRBuilder<> builder(FirstNewBlock->begin()); 660 for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) { 661 AllocaInst *AI = IFI.StaticAllocas[ai]; 662 663 // If the alloca is already scoped to something smaller than the whole 664 // function then there's no need to add redundant, less accurate markers. 665 if (hasLifetimeMarkers(AI)) 666 continue; 667 668 builder.CreateLifetimeStart(AI); 669 for (unsigned ri = 0, re = Returns.size(); ri != re; ++ri) { 670 IRBuilder<> builder(Returns[ri]); 671 builder.CreateLifetimeEnd(AI); 672 } 673 } 674 } 675 676 // If the inlined code contained dynamic alloca instructions, wrap the inlined 677 // code with llvm.stacksave/llvm.stackrestore intrinsics. 678 if (InlinedFunctionInfo.ContainsDynamicAllocas) { 679 Module *M = Caller->getParent(); 680 // Get the two intrinsics we care about. 681 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave); 682 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore); 683 684 // Insert the llvm.stacksave. 685 CallInst *SavedPtr = IRBuilder<>(FirstNewBlock, FirstNewBlock->begin()) 686 .CreateCall(StackSave, "savedstack"); 687 688 // Insert a call to llvm.stackrestore before any return instructions in the 689 // inlined function. 690 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 691 IRBuilder<>(Returns[i]).CreateCall(StackRestore, SavedPtr); 692 } 693 } 694 695 // If we are inlining tail call instruction through a call site that isn't 696 // marked 'tail', we must remove the tail marker for any calls in the inlined 697 // code. Also, calls inlined through a 'nounwind' call site should be marked 698 // 'nounwind'. 699 if (InlinedFunctionInfo.ContainsCalls && 700 (MustClearTailCallFlags || MarkNoUnwind)) { 701 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); 702 BB != E; ++BB) 703 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) 704 if (CallInst *CI = dyn_cast<CallInst>(I)) { 705 if (MustClearTailCallFlags) 706 CI->setTailCall(false); 707 if (MarkNoUnwind) 708 CI->setDoesNotThrow(); 709 } 710 } 711 712 // If we are inlining for an invoke instruction, we must make sure to rewrite 713 // any call instructions into invoke instructions. 714 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) 715 HandleInlinedInvoke(II, FirstNewBlock, InlinedFunctionInfo); 716 717 // If we cloned in _exactly one_ basic block, and if that block ends in a 718 // return instruction, we splice the body of the inlined callee directly into 719 // the calling basic block. 720 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) { 721 // Move all of the instructions right before the call. 722 OrigBB->getInstList().splice(TheCall, FirstNewBlock->getInstList(), 723 FirstNewBlock->begin(), FirstNewBlock->end()); 724 // Remove the cloned basic block. 725 Caller->getBasicBlockList().pop_back(); 726 727 // If the call site was an invoke instruction, add a branch to the normal 728 // destination. 729 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) 730 BranchInst::Create(II->getNormalDest(), TheCall); 731 732 // If the return instruction returned a value, replace uses of the call with 733 // uses of the returned value. 734 if (!TheCall->use_empty()) { 735 ReturnInst *R = Returns[0]; 736 if (TheCall == R->getReturnValue()) 737 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 738 else 739 TheCall->replaceAllUsesWith(R->getReturnValue()); 740 } 741 // Since we are now done with the Call/Invoke, we can delete it. 742 TheCall->eraseFromParent(); 743 744 // Since we are now done with the return instruction, delete it also. 745 Returns[0]->eraseFromParent(); 746 747 // We are now done with the inlining. 748 return true; 749 } 750 751 // Otherwise, we have the normal case, of more than one block to inline or 752 // multiple return sites. 753 754 // We want to clone the entire callee function into the hole between the 755 // "starter" and "ender" blocks. How we accomplish this depends on whether 756 // this is an invoke instruction or a call instruction. 757 BasicBlock *AfterCallBB; 758 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) { 759 760 // Add an unconditional branch to make this look like the CallInst case... 761 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall); 762 763 // Split the basic block. This guarantees that no PHI nodes will have to be 764 // updated due to new incoming edges, and make the invoke case more 765 // symmetric to the call case. 766 AfterCallBB = OrigBB->splitBasicBlock(NewBr, 767 CalledFunc->getName()+".exit"); 768 769 } else { // It's a call 770 // If this is a call instruction, we need to split the basic block that 771 // the call lives in. 772 // 773 AfterCallBB = OrigBB->splitBasicBlock(TheCall, 774 CalledFunc->getName()+".exit"); 775 } 776 777 // Change the branch that used to go to AfterCallBB to branch to the first 778 // basic block of the inlined function. 779 // 780 TerminatorInst *Br = OrigBB->getTerminator(); 781 assert(Br && Br->getOpcode() == Instruction::Br && 782 "splitBasicBlock broken!"); 783 Br->setOperand(0, FirstNewBlock); 784 785 786 // Now that the function is correct, make it a little bit nicer. In 787 // particular, move the basic blocks inserted from the end of the function 788 // into the space made by splitting the source basic block. 789 Caller->getBasicBlockList().splice(AfterCallBB, Caller->getBasicBlockList(), 790 FirstNewBlock, Caller->end()); 791 792 // Handle all of the return instructions that we just cloned in, and eliminate 793 // any users of the original call/invoke instruction. 794 Type *RTy = CalledFunc->getReturnType(); 795 796 PHINode *PHI = 0; 797 if (Returns.size() > 1) { 798 // The PHI node should go at the front of the new basic block to merge all 799 // possible incoming values. 800 if (!TheCall->use_empty()) { 801 PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(), 802 AfterCallBB->begin()); 803 // Anything that used the result of the function call should now use the 804 // PHI node as their operand. 805 TheCall->replaceAllUsesWith(PHI); 806 } 807 808 // Loop over all of the return instructions adding entries to the PHI node 809 // as appropriate. 810 if (PHI) { 811 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 812 ReturnInst *RI = Returns[i]; 813 assert(RI->getReturnValue()->getType() == PHI->getType() && 814 "Ret value not consistent in function!"); 815 PHI->addIncoming(RI->getReturnValue(), RI->getParent()); 816 } 817 } 818 819 820 // Add a branch to the merge points and remove return instructions. 821 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 822 ReturnInst *RI = Returns[i]; 823 BranchInst::Create(AfterCallBB, RI); 824 RI->eraseFromParent(); 825 } 826 } else if (!Returns.empty()) { 827 // Otherwise, if there is exactly one return value, just replace anything 828 // using the return value of the call with the computed value. 829 if (!TheCall->use_empty()) { 830 if (TheCall == Returns[0]->getReturnValue()) 831 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 832 else 833 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue()); 834 } 835 836 // Update PHI nodes that use the ReturnBB to use the AfterCallBB. 837 BasicBlock *ReturnBB = Returns[0]->getParent(); 838 ReturnBB->replaceAllUsesWith(AfterCallBB); 839 840 // Splice the code from the return block into the block that it will return 841 // to, which contains the code that was after the call. 842 AfterCallBB->getInstList().splice(AfterCallBB->begin(), 843 ReturnBB->getInstList()); 844 845 // Delete the return instruction now and empty ReturnBB now. 846 Returns[0]->eraseFromParent(); 847 ReturnBB->eraseFromParent(); 848 } else if (!TheCall->use_empty()) { 849 // No returns, but something is using the return value of the call. Just 850 // nuke the result. 851 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 852 } 853 854 // Since we are now done with the Call/Invoke, we can delete it. 855 TheCall->eraseFromParent(); 856 857 // We should always be able to fold the entry block of the function into the 858 // single predecessor of the block... 859 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!"); 860 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0); 861 862 // Splice the code entry block into calling block, right before the 863 // unconditional branch. 864 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes 865 OrigBB->getInstList().splice(Br, CalleeEntry->getInstList()); 866 867 // Remove the unconditional branch. 868 OrigBB->getInstList().erase(Br); 869 870 // Now we can remove the CalleeEntry block, which is now empty. 871 Caller->getBasicBlockList().erase(CalleeEntry); 872 873 // If we inserted a phi node, check to see if it has a single value (e.g. all 874 // the entries are the same or undef). If so, remove the PHI so it doesn't 875 // block other optimizations. 876 if (PHI) { 877 if (Value *V = SimplifyInstruction(PHI, IFI.TD)) { 878 PHI->replaceAllUsesWith(V); 879 PHI->eraseFromParent(); 880 } 881 } 882 883 return true; 884} 885