Inliner.cpp revision f9c3b228e5579e0d2a9cd05a2191fe17b4c58b23
1//===- Inliner.cpp - Code common to all inliners --------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the mechanics required to implement inlining without
11// missing any calls and updating the call graph.  The decisions of which calls
12// are profitable to inline are implemented elsewhere.
13//
14//===----------------------------------------------------------------------===//
15
16#define DEBUG_TYPE "inline"
17#include "llvm/Module.h"
18#include "llvm/Instructions.h"
19#include "llvm/IntrinsicInst.h"
20#include "llvm/Analysis/CallGraph.h"
21#include "llvm/Analysis/InlineCost.h"
22#include "llvm/Target/TargetData.h"
23#include "llvm/Transforms/IPO/InlinerPass.h"
24#include "llvm/Transforms/Utils/Cloning.h"
25#include "llvm/Transforms/Utils/Local.h"
26#include "llvm/Support/CallSite.h"
27#include "llvm/Support/CommandLine.h"
28#include "llvm/Support/Debug.h"
29#include "llvm/Support/raw_ostream.h"
30#include "llvm/ADT/SmallPtrSet.h"
31#include "llvm/ADT/Statistic.h"
32#include <set>
33using namespace llvm;
34
35STATISTIC(NumInlined, "Number of functions inlined");
36STATISTIC(NumCallsDeleted, "Number of call sites deleted, not inlined");
37STATISTIC(NumDeleted, "Number of functions deleted because all callers found");
38STATISTIC(NumMergedAllocas, "Number of allocas merged together");
39
40static cl::opt<int>
41InlineLimit("inline-threshold", cl::Hidden, cl::init(225), cl::ZeroOrMore,
42        cl::desc("Control the amount of inlining to perform (default = 225)"));
43
44Inliner::Inliner(void *ID)
45  : CallGraphSCCPass(ID), InlineThreshold(InlineLimit) {}
46
47Inliner::Inliner(void *ID, int Threshold)
48  : CallGraphSCCPass(ID), InlineThreshold(Threshold) {}
49
50/// getAnalysisUsage - For this class, we declare that we require and preserve
51/// the call graph.  If the derived class implements this method, it should
52/// always explicitly call the implementation here.
53void Inliner::getAnalysisUsage(AnalysisUsage &Info) const {
54  CallGraphSCCPass::getAnalysisUsage(Info);
55}
56
57
58typedef DenseMap<const ArrayType*, std::vector<AllocaInst*> >
59InlinedArrayAllocasTy;
60
61/// InlineCallIfPossible - If it is possible to inline the specified call site,
62/// do so and update the CallGraph for this operation.
63///
64/// This function also does some basic book-keeping to update the IR.  The
65/// InlinedArrayAllocas map keeps track of any allocas that are already
66/// available from other  functions inlined into the caller.  If we are able to
67/// inline this call site we attempt to reuse already available allocas or add
68/// any new allocas to the set if not possible.
69static bool InlineCallIfPossible(CallSite CS, CallGraph &CG,
70                                 const TargetData *TD,
71                                 InlinedArrayAllocasTy &InlinedArrayAllocas) {
72  Function *Callee = CS.getCalledFunction();
73  Function *Caller = CS.getCaller();
74
75  // Try to inline the function.  Get the list of static allocas that were
76  // inlined.
77  SmallVector<AllocaInst*, 16> StaticAllocas;
78  if (!InlineFunction(CS, &CG, TD, &StaticAllocas))
79    return false;
80
81  // If the inlined function had a higher stack protection level than the
82  // calling function, then bump up the caller's stack protection level.
83  if (Callee->hasFnAttr(Attribute::StackProtectReq))
84    Caller->addFnAttr(Attribute::StackProtectReq);
85  else if (Callee->hasFnAttr(Attribute::StackProtect) &&
86           !Caller->hasFnAttr(Attribute::StackProtectReq))
87    Caller->addFnAttr(Attribute::StackProtect);
88
89
90  // Look at all of the allocas that we inlined through this call site.  If we
91  // have already inlined other allocas through other calls into this function,
92  // then we know that they have disjoint lifetimes and that we can merge them.
93  //
94  // There are many heuristics possible for merging these allocas, and the
95  // different options have different tradeoffs.  One thing that we *really*
96  // don't want to hurt is SRoA: once inlining happens, often allocas are no
97  // longer address taken and so they can be promoted.
98  //
99  // Our "solution" for that is to only merge allocas whose outermost type is an
100  // array type.  These are usually not promoted because someone is using a
101  // variable index into them.  These are also often the most important ones to
102  // merge.
103  //
104  // A better solution would be to have real memory lifetime markers in the IR
105  // and not have the inliner do any merging of allocas at all.  This would
106  // allow the backend to do proper stack slot coloring of all allocas that
107  // *actually make it to the backend*, which is really what we want.
108  //
109  // Because we don't have this information, we do this simple and useful hack.
110  //
111  SmallPtrSet<AllocaInst*, 16> UsedAllocas;
112
113  // Loop over all the allocas we have so far and see if they can be merged with
114  // a previously inlined alloca.  If not, remember that we had it.
115  for (unsigned AllocaNo = 0, e = StaticAllocas.size();
116       AllocaNo != e; ++AllocaNo) {
117    AllocaInst *AI = StaticAllocas[AllocaNo];
118
119    // Don't bother trying to merge array allocations (they will usually be
120    // canonicalized to be an allocation *of* an array), or allocations whose
121    // type is not itself an array (because we're afraid of pessimizing SRoA).
122    const ArrayType *ATy = dyn_cast<ArrayType>(AI->getAllocatedType());
123    if (ATy == 0 || AI->isArrayAllocation())
124      continue;
125
126    // Get the list of all available allocas for this array type.
127    std::vector<AllocaInst*> &AllocasForType = InlinedArrayAllocas[ATy];
128
129    // Loop over the allocas in AllocasForType to see if we can reuse one.  Note
130    // that we have to be careful not to reuse the same "available" alloca for
131    // multiple different allocas that we just inlined, we use the 'UsedAllocas'
132    // set to keep track of which "available" allocas are being used by this
133    // function.  Also, AllocasForType can be empty of course!
134    bool MergedAwayAlloca = false;
135    for (unsigned i = 0, e = AllocasForType.size(); i != e; ++i) {
136      AllocaInst *AvailableAlloca = AllocasForType[i];
137
138      // The available alloca has to be in the right function, not in some other
139      // function in this SCC.
140      if (AvailableAlloca->getParent() != AI->getParent())
141        continue;
142
143      // If the inlined function already uses this alloca then we can't reuse
144      // it.
145      if (!UsedAllocas.insert(AvailableAlloca))
146        continue;
147
148      // Otherwise, we *can* reuse it, RAUW AI into AvailableAlloca and declare
149      // success!
150      DEBUG(dbgs() << "    ***MERGED ALLOCA: " << *AI);
151
152      AI->replaceAllUsesWith(AvailableAlloca);
153      AI->eraseFromParent();
154      MergedAwayAlloca = true;
155      ++NumMergedAllocas;
156      break;
157    }
158
159    // If we already nuked the alloca, we're done with it.
160    if (MergedAwayAlloca)
161      continue;
162
163    // If we were unable to merge away the alloca either because there are no
164    // allocas of the right type available or because we reused them all
165    // already, remember that this alloca came from an inlined function and mark
166    // it used so we don't reuse it for other allocas from this inline
167    // operation.
168    AllocasForType.push_back(AI);
169    UsedAllocas.insert(AI);
170  }
171
172  return true;
173}
174
175unsigned Inliner::getInlineThreshold(Function* Caller) const {
176  if (Caller && !Caller->isDeclaration() &&
177      Caller->hasFnAttr(Attribute::OptimizeForSize) &&
178      InlineLimit.getNumOccurrences() == 0)
179    return 75;
180  else
181    return InlineThreshold;
182}
183
184/// shouldInline - Return true if the inliner should attempt to inline
185/// at the given CallSite.
186bool Inliner::shouldInline(CallSite CS) {
187  InlineCost IC = getInlineCost(CS);
188
189  if (IC.isAlways()) {
190    DEBUG(dbgs() << "    Inlining: cost=always"
191          << ", Call: " << *CS.getInstruction() << "\n");
192    return true;
193  }
194
195  if (IC.isNever()) {
196    DEBUG(dbgs() << "    NOT Inlining: cost=never"
197          << ", Call: " << *CS.getInstruction() << "\n");
198    return false;
199  }
200
201  int Cost = IC.getValue();
202  Function *Caller = CS.getCaller();
203  int CurrentThreshold = getInlineThreshold(Caller);
204  float FudgeFactor = getInlineFudgeFactor(CS);
205  if (Cost >= (int)(CurrentThreshold * FudgeFactor)) {
206    DEBUG(dbgs() << "    NOT Inlining: cost=" << Cost
207          << ", Call: " << *CS.getInstruction() << "\n");
208    return false;
209  }
210
211  // Try to detect the case where the current inlining candidate caller
212  // (call it B) is a static function and is an inlining candidate elsewhere,
213  // and the current candidate callee (call it C) is large enough that
214  // inlining it into B would make B too big to inline later.  In these
215  // circumstances it may be best not to inline C into B, but to inline B
216  // into its callers.
217  if (Caller->hasLocalLinkage()) {
218    int TotalSecondaryCost = 0;
219    bool outerCallsFound = false;
220    bool allOuterCallsWillBeInlined = true;
221    bool someOuterCallWouldNotBeInlined = false;
222    for (Value::use_iterator I = Caller->use_begin(), E =Caller->use_end();
223         I != E; ++I) {
224      CallSite CS2 = CallSite::get(*I);
225
226      // If this isn't a call to Caller (it could be some other sort
227      // of reference) skip it.
228      if (CS2.getInstruction() == 0 || CS2.getCalledFunction() != Caller)
229        continue;
230
231      InlineCost IC2 = getInlineCost(CS2);
232      if (IC2.isNever())
233        allOuterCallsWillBeInlined = false;
234      if (IC2.isAlways() || IC2.isNever())
235        continue;
236
237      outerCallsFound = true;
238      int Cost2 = IC2.getValue();
239      Function *Caller2 = CS2.getCaller();
240      int CurrentThreshold2 = getInlineThreshold(Caller2);
241      float FudgeFactor2 = getInlineFudgeFactor(CS2);
242
243      if (Cost2 >= (int)(CurrentThreshold2 * FudgeFactor2))
244        allOuterCallsWillBeInlined = false;
245
246      // See if we have this case.  We subtract off the penalty
247      // for the call instruction, which we would be deleting.
248      if (Cost2 < (int)(CurrentThreshold2 * FudgeFactor2) &&
249          Cost2 + Cost - (InlineConstants::CallPenalty + 1) >=
250                (int)(CurrentThreshold2 * FudgeFactor2)) {
251        someOuterCallWouldNotBeInlined = true;
252        TotalSecondaryCost += Cost2;
253      }
254    }
255    // If all outer calls to Caller would get inlined, the cost for the last
256    // one is set very low by getInlineCost, in anticipation that Caller will
257    // be removed entirely.  We did not account for this above unless there
258    // is only one caller of Caller.
259    if (allOuterCallsWillBeInlined && Caller->use_begin() != Caller->use_end())
260      TotalSecondaryCost += InlineConstants::LastCallToStaticBonus;
261
262    if (outerCallsFound && someOuterCallWouldNotBeInlined &&
263        TotalSecondaryCost < Cost) {
264      DEBUG(dbgs() << "    NOT Inlining: " << *CS.getInstruction() <<
265           " Cost = " << Cost <<
266           ", outer Cost = " << TotalSecondaryCost << '\n');
267      return false;
268    }
269  }
270
271  DEBUG(dbgs() << "    Inlining: cost=" << Cost
272        << ", Call: " << *CS.getInstruction() << '\n');
273  return true;
274}
275
276bool Inliner::runOnSCC(std::vector<CallGraphNode*> &SCC) {
277  CallGraph &CG = getAnalysis<CallGraph>();
278  const TargetData *TD = getAnalysisIfAvailable<TargetData>();
279
280  SmallPtrSet<Function*, 8> SCCFunctions;
281  DEBUG(dbgs() << "Inliner visiting SCC:");
282  for (unsigned i = 0, e = SCC.size(); i != e; ++i) {
283    Function *F = SCC[i]->getFunction();
284    if (F) SCCFunctions.insert(F);
285    DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE"));
286  }
287
288  // Scan through and identify all call sites ahead of time so that we only
289  // inline call sites in the original functions, not call sites that result
290  // from inlining other functions.
291  SmallVector<CallSite, 16> CallSites;
292
293  for (unsigned i = 0, e = SCC.size(); i != e; ++i) {
294    Function *F = SCC[i]->getFunction();
295    if (!F) continue;
296
297    for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
298      for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
299        CallSite CS = CallSite::get(I);
300        // If this isn't a call, or it is a call to an intrinsic, it can
301        // never be inlined.
302        if (CS.getInstruction() == 0 || isa<IntrinsicInst>(I))
303          continue;
304
305        // If this is a direct call to an external function, we can never inline
306        // it.  If it is an indirect call, inlining may resolve it to be a
307        // direct call, so we keep it.
308        if (CS.getCalledFunction() && CS.getCalledFunction()->isDeclaration())
309          continue;
310
311        CallSites.push_back(CS);
312      }
313  }
314
315  DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n");
316
317  // Now that we have all of the call sites, move the ones to functions in the
318  // current SCC to the end of the list.
319  unsigned FirstCallInSCC = CallSites.size();
320  for (unsigned i = 0; i < FirstCallInSCC; ++i)
321    if (Function *F = CallSites[i].getCalledFunction())
322      if (SCCFunctions.count(F))
323        std::swap(CallSites[i--], CallSites[--FirstCallInSCC]);
324
325
326  InlinedArrayAllocasTy InlinedArrayAllocas;
327
328  // Now that we have all of the call sites, loop over them and inline them if
329  // it looks profitable to do so.
330  bool Changed = false;
331  bool LocalChange;
332  do {
333    LocalChange = false;
334    // Iterate over the outer loop because inlining functions can cause indirect
335    // calls to become direct calls.
336    for (unsigned CSi = 0; CSi != CallSites.size(); ++CSi) {
337      CallSite CS = CallSites[CSi];
338
339      Function *Caller = CS.getCaller();
340      Function *Callee = CS.getCalledFunction();
341
342      // If this call site is dead and it is to a readonly function, we should
343      // just delete the call instead of trying to inline it, regardless of
344      // size.  This happens because IPSCCP propagates the result out of the
345      // call and then we're left with the dead call.
346      if (isInstructionTriviallyDead(CS.getInstruction())) {
347        DEBUG(dbgs() << "    -> Deleting dead call: "
348                     << *CS.getInstruction() << "\n");
349        // Update the call graph by deleting the edge from Callee to Caller.
350        CG[Caller]->removeCallEdgeFor(CS);
351        CS.getInstruction()->eraseFromParent();
352        ++NumCallsDeleted;
353      } else {
354        // We can only inline direct calls to non-declarations.
355        if (Callee == 0 || Callee->isDeclaration()) continue;
356
357        // If the policy determines that we should inline this function,
358        // try to do so.
359        if (!shouldInline(CS))
360          continue;
361
362        // Attempt to inline the function...
363        if (!InlineCallIfPossible(CS, CG, TD, InlinedArrayAllocas))
364          continue;
365        ++NumInlined;
366      }
367
368      // If we inlined or deleted the last possible call site to the function,
369      // delete the function body now.
370      if (Callee && Callee->use_empty() && Callee->hasLocalLinkage() &&
371          // TODO: Can remove if in SCC now.
372          !SCCFunctions.count(Callee) &&
373
374          // The function may be apparently dead, but if there are indirect
375          // callgraph references to the node, we cannot delete it yet, this
376          // could invalidate the CGSCC iterator.
377          CG[Callee]->getNumReferences() == 0) {
378        DEBUG(dbgs() << "    -> Deleting dead function: "
379              << Callee->getName() << "\n");
380        CallGraphNode *CalleeNode = CG[Callee];
381
382        // Remove any call graph edges from the callee to its callees.
383        CalleeNode->removeAllCalledFunctions();
384
385        resetCachedCostInfo(Callee);
386
387        // Removing the node for callee from the call graph and delete it.
388        delete CG.removeFunctionFromModule(CalleeNode);
389        ++NumDeleted;
390      }
391
392      // Remove any cached cost info for this caller, as inlining the
393      // callee has increased the size of the caller (which may be the
394      // same as the callee).
395      resetCachedCostInfo(Caller);
396
397      // Remove this call site from the list.  If possible, use
398      // swap/pop_back for efficiency, but do not use it if doing so would
399      // move a call site to a function in this SCC before the
400      // 'FirstCallInSCC' barrier.
401      if (SCC.size() == 1) {
402        std::swap(CallSites[CSi], CallSites.back());
403        CallSites.pop_back();
404      } else {
405        CallSites.erase(CallSites.begin()+CSi);
406      }
407      --CSi;
408
409      Changed = true;
410      LocalChange = true;
411    }
412  } while (LocalChange);
413
414  return Changed;
415}
416
417// doFinalization - Remove now-dead linkonce functions at the end of
418// processing to avoid breaking the SCC traversal.
419bool Inliner::doFinalization(CallGraph &CG) {
420  return removeDeadFunctions(CG);
421}
422
423/// removeDeadFunctions - Remove dead functions that are not included in
424/// DNR (Do Not Remove) list.
425bool Inliner::removeDeadFunctions(CallGraph &CG,
426                                  SmallPtrSet<const Function *, 16> *DNR) {
427  SmallPtrSet<CallGraphNode*, 16> FunctionsToRemove;
428
429  // Scan for all of the functions, looking for ones that should now be removed
430  // from the program.  Insert the dead ones in the FunctionsToRemove set.
431  for (CallGraph::iterator I = CG.begin(), E = CG.end(); I != E; ++I) {
432    CallGraphNode *CGN = I->second;
433    if (CGN->getFunction() == 0)
434      continue;
435
436    Function *F = CGN->getFunction();
437
438    // If the only remaining users of the function are dead constants, remove
439    // them.
440    F->removeDeadConstantUsers();
441
442    if (DNR && DNR->count(F))
443      continue;
444    if (!F->hasLinkOnceLinkage() && !F->hasLocalLinkage() &&
445        !F->hasAvailableExternallyLinkage())
446      continue;
447    if (!F->use_empty())
448      continue;
449
450    // Remove any call graph edges from the function to its callees.
451    CGN->removeAllCalledFunctions();
452
453    // Remove any edges from the external node to the function's call graph
454    // node.  These edges might have been made irrelegant due to
455    // optimization of the program.
456    CG.getExternalCallingNode()->removeAnyCallEdgeTo(CGN);
457
458    // Removing the node for callee from the call graph and delete it.
459    FunctionsToRemove.insert(CGN);
460  }
461
462  // Now that we know which functions to delete, do so.  We didn't want to do
463  // this inline, because that would invalidate our CallGraph::iterator
464  // objects. :(
465  //
466  // Note that it doesn't matter that we are iterating over a non-stable set
467  // here to do this, it doesn't matter which order the functions are deleted
468  // in.
469  bool Changed = false;
470  for (SmallPtrSet<CallGraphNode*, 16>::iterator I = FunctionsToRemove.begin(),
471       E = FunctionsToRemove.end(); I != E; ++I) {
472    resetCachedCostInfo((*I)->getFunction());
473    delete CG.removeFunctionFromModule(*I);
474    ++NumDeleted;
475    Changed = true;
476  }
477
478  return Changed;
479}
480