Inliner.cpp revision 45de584b4f82fbfb9cb9c50bc1fc08931b534308
1//===- Inliner.cpp - Code common to all inliners --------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the mechanics required to implement inlining without
11// missing any calls and updating the call graph.  The decisions of which calls
12// are profitable to inline are implemented elsewhere.
13//
14//===----------------------------------------------------------------------===//
15
16#define DEBUG_TYPE "inline"
17#include "llvm/Module.h"
18#include "llvm/Instructions.h"
19#include "llvm/IntrinsicInst.h"
20#include "llvm/Analysis/CallGraph.h"
21#include "llvm/Analysis/InlineCost.h"
22#include "llvm/Target/TargetData.h"
23#include "llvm/Transforms/IPO/InlinerPass.h"
24#include "llvm/Transforms/Utils/Cloning.h"
25#include "llvm/Transforms/Utils/Local.h"
26#include "llvm/Support/CallSite.h"
27#include "llvm/Support/CommandLine.h"
28#include "llvm/Support/Debug.h"
29#include "llvm/Support/raw_ostream.h"
30#include "llvm/ADT/SmallPtrSet.h"
31#include "llvm/ADT/Statistic.h"
32using namespace llvm;
33
34STATISTIC(NumInlined, "Number of functions inlined");
35STATISTIC(NumCallsDeleted, "Number of call sites deleted, not inlined");
36STATISTIC(NumDeleted, "Number of functions deleted because all callers found");
37STATISTIC(NumMergedAllocas, "Number of allocas merged together");
38
39static cl::opt<int>
40InlineLimit("inline-threshold", cl::Hidden, cl::init(225), cl::ZeroOrMore,
41        cl::desc("Control the amount of inlining to perform (default = 225)"));
42
43static cl::opt<int>
44HintThreshold("inlinehint-threshold", cl::Hidden, cl::init(325),
45              cl::desc("Threshold for inlining functions with inline hint"));
46
47// Threshold to use when optsize is specified (and there is no -inline-limit).
48const int OptSizeThreshold = 75;
49
50Inliner::Inliner(char &ID)
51  : CallGraphSCCPass(ID), InlineThreshold(InlineLimit), InsertLifetime(true) {}
52
53Inliner::Inliner(char &ID, int Threshold, bool InsertLifetime)
54  : CallGraphSCCPass(ID), InlineThreshold(InlineLimit.getNumOccurrences() > 0 ?
55                                          InlineLimit : Threshold),
56    InsertLifetime(InsertLifetime) {}
57
58/// getAnalysisUsage - For this class, we declare that we require and preserve
59/// the call graph.  If the derived class implements this method, it should
60/// always explicitly call the implementation here.
61void Inliner::getAnalysisUsage(AnalysisUsage &Info) const {
62  CallGraphSCCPass::getAnalysisUsage(Info);
63}
64
65
66typedef DenseMap<ArrayType*, std::vector<AllocaInst*> >
67InlinedArrayAllocasTy;
68
69/// InlineCallIfPossible - If it is possible to inline the specified call site,
70/// do so and update the CallGraph for this operation.
71///
72/// This function also does some basic book-keeping to update the IR.  The
73/// InlinedArrayAllocas map keeps track of any allocas that are already
74/// available from other  functions inlined into the caller.  If we are able to
75/// inline this call site we attempt to reuse already available allocas or add
76/// any new allocas to the set if not possible.
77static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
78                                 InlinedArrayAllocasTy &InlinedArrayAllocas,
79                                 int InlineHistory, bool InsertLifetime) {
80  Function *Callee = CS.getCalledFunction();
81  Function *Caller = CS.getCaller();
82
83  // Try to inline the function.  Get the list of static allocas that were
84  // inlined.
85  if (!InlineFunction(CS, IFI, InsertLifetime))
86    return false;
87
88  // If the inlined function had a higher stack protection level than the
89  // calling function, then bump up the caller's stack protection level.
90  if (Callee->hasFnAttr(Attribute::StackProtectReq))
91    Caller->addFnAttr(Attribute::StackProtectReq);
92  else if (Callee->hasFnAttr(Attribute::StackProtect) &&
93           !Caller->hasFnAttr(Attribute::StackProtectReq))
94    Caller->addFnAttr(Attribute::StackProtect);
95
96  // Look at all of the allocas that we inlined through this call site.  If we
97  // have already inlined other allocas through other calls into this function,
98  // then we know that they have disjoint lifetimes and that we can merge them.
99  //
100  // There are many heuristics possible for merging these allocas, and the
101  // different options have different tradeoffs.  One thing that we *really*
102  // don't want to hurt is SRoA: once inlining happens, often allocas are no
103  // longer address taken and so they can be promoted.
104  //
105  // Our "solution" for that is to only merge allocas whose outermost type is an
106  // array type.  These are usually not promoted because someone is using a
107  // variable index into them.  These are also often the most important ones to
108  // merge.
109  //
110  // A better solution would be to have real memory lifetime markers in the IR
111  // and not have the inliner do any merging of allocas at all.  This would
112  // allow the backend to do proper stack slot coloring of all allocas that
113  // *actually make it to the backend*, which is really what we want.
114  //
115  // Because we don't have this information, we do this simple and useful hack.
116  //
117  SmallPtrSet<AllocaInst*, 16> UsedAllocas;
118
119  // When processing our SCC, check to see if CS was inlined from some other
120  // call site.  For example, if we're processing "A" in this code:
121  //   A() { B() }
122  //   B() { x = alloca ... C() }
123  //   C() { y = alloca ... }
124  // Assume that C was not inlined into B initially, and so we're processing A
125  // and decide to inline B into A.  Doing this makes an alloca available for
126  // reuse and makes a callsite (C) available for inlining.  When we process
127  // the C call site we don't want to do any alloca merging between X and Y
128  // because their scopes are not disjoint.  We could make this smarter by
129  // keeping track of the inline history for each alloca in the
130  // InlinedArrayAllocas but this isn't likely to be a significant win.
131  if (InlineHistory != -1)  // Only do merging for top-level call sites in SCC.
132    return true;
133
134  // Loop over all the allocas we have so far and see if they can be merged with
135  // a previously inlined alloca.  If not, remember that we had it.
136  for (unsigned AllocaNo = 0, e = IFI.StaticAllocas.size();
137       AllocaNo != e; ++AllocaNo) {
138    AllocaInst *AI = IFI.StaticAllocas[AllocaNo];
139
140    // Don't bother trying to merge array allocations (they will usually be
141    // canonicalized to be an allocation *of* an array), or allocations whose
142    // type is not itself an array (because we're afraid of pessimizing SRoA).
143    ArrayType *ATy = dyn_cast<ArrayType>(AI->getAllocatedType());
144    if (ATy == 0 || AI->isArrayAllocation())
145      continue;
146
147    // Get the list of all available allocas for this array type.
148    std::vector<AllocaInst*> &AllocasForType = InlinedArrayAllocas[ATy];
149
150    // Loop over the allocas in AllocasForType to see if we can reuse one.  Note
151    // that we have to be careful not to reuse the same "available" alloca for
152    // multiple different allocas that we just inlined, we use the 'UsedAllocas'
153    // set to keep track of which "available" allocas are being used by this
154    // function.  Also, AllocasForType can be empty of course!
155    bool MergedAwayAlloca = false;
156    for (unsigned i = 0, e = AllocasForType.size(); i != e; ++i) {
157      AllocaInst *AvailableAlloca = AllocasForType[i];
158
159      // The available alloca has to be in the right function, not in some other
160      // function in this SCC.
161      if (AvailableAlloca->getParent() != AI->getParent())
162        continue;
163
164      // If the inlined function already uses this alloca then we can't reuse
165      // it.
166      if (!UsedAllocas.insert(AvailableAlloca))
167        continue;
168
169      // Otherwise, we *can* reuse it, RAUW AI into AvailableAlloca and declare
170      // success!
171      DEBUG(dbgs() << "    ***MERGED ALLOCA: " << *AI << "\n\t\tINTO: "
172                   << *AvailableAlloca << '\n');
173
174      AI->replaceAllUsesWith(AvailableAlloca);
175      AI->eraseFromParent();
176      MergedAwayAlloca = true;
177      ++NumMergedAllocas;
178      IFI.StaticAllocas[AllocaNo] = 0;
179      break;
180    }
181
182    // If we already nuked the alloca, we're done with it.
183    if (MergedAwayAlloca)
184      continue;
185
186    // If we were unable to merge away the alloca either because there are no
187    // allocas of the right type available or because we reused them all
188    // already, remember that this alloca came from an inlined function and mark
189    // it used so we don't reuse it for other allocas from this inline
190    // operation.
191    AllocasForType.push_back(AI);
192    UsedAllocas.insert(AI);
193  }
194
195  return true;
196}
197
198unsigned Inliner::getInlineThreshold(CallSite CS) const {
199  int thres = InlineThreshold;
200
201  // Listen to optsize when -inline-limit is not given.
202  Function *Caller = CS.getCaller();
203  if (Caller && !Caller->isDeclaration() &&
204      Caller->hasFnAttr(Attribute::OptimizeForSize) &&
205      InlineLimit.getNumOccurrences() == 0)
206    thres = OptSizeThreshold;
207
208  // Listen to inlinehint when it would increase the threshold.
209  Function *Callee = CS.getCalledFunction();
210  if (HintThreshold > thres && Callee && !Callee->isDeclaration() &&
211      Callee->hasFnAttr(Attribute::InlineHint))
212    thres = HintThreshold;
213
214  return thres;
215}
216
217/// shouldInline - Return true if the inliner should attempt to inline
218/// at the given CallSite.
219bool Inliner::shouldInline(CallSite CS) {
220  InlineCost IC = getInlineCost(CS);
221
222  if (IC.isAlways()) {
223    DEBUG(dbgs() << "    Inlining: cost=always"
224          << ", Call: " << *CS.getInstruction() << "\n");
225    return true;
226  }
227
228  if (IC.isNever()) {
229    DEBUG(dbgs() << "    NOT Inlining: cost=never"
230          << ", Call: " << *CS.getInstruction() << "\n");
231    return false;
232  }
233
234  Function *Caller = CS.getCaller();
235  if (!IC) {
236    DEBUG(dbgs() << "    NOT Inlining: cost=" << IC.getCost()
237          << ", thres=" << (IC.getCostDelta() + IC.getCost())
238          << ", Call: " << *CS.getInstruction() << "\n");
239    return false;
240  }
241
242  // Try to detect the case where the current inlining candidate caller (call
243  // it B) is a static or linkonce-ODR function and is an inlining candidate
244  // elsewhere, and the current candidate callee (call it C) is large enough
245  // that inlining it into B would make B too big to inline later. In these
246  // circumstances it may be best not to inline C into B, but to inline B into
247  // its callers.
248  //
249  // This only applies to static and linkonce-ODR functions because those are
250  // expected to be available for inlining in the translation units where they
251  // are used. Thus we will always have the opportunity to make local inlining
252  // decisions. Importantly the linkonce-ODR linkage covers inline functions
253  // and templates in C++.
254  //
255  // FIXME: All of this logic should be sunk into getInlineCost. It relies on
256  // the internal implementation of the inline cost metrics rather than
257  // treating them as truly abstract units etc.
258  if (Caller->hasLocalLinkage() ||
259      Caller->getLinkage() == GlobalValue::LinkOnceODRLinkage) {
260    int TotalSecondaryCost = 0;
261    // The candidate cost to be imposed upon the current function.
262    int CandidateCost = IC.getCost() - (InlineConstants::CallPenalty + 1);
263    // This bool tracks what happens if we do NOT inline C into B.
264    bool callerWillBeRemoved = Caller->hasLocalLinkage();
265    // This bool tracks what happens if we DO inline C into B.
266    bool inliningPreventsSomeOuterInline = false;
267    for (Value::use_iterator I = Caller->use_begin(), E =Caller->use_end();
268         I != E; ++I) {
269      CallSite CS2(*I);
270
271      // If this isn't a call to Caller (it could be some other sort
272      // of reference) skip it.  Such references will prevent the caller
273      // from being removed.
274      if (!CS2 || CS2.getCalledFunction() != Caller) {
275        callerWillBeRemoved = false;
276        continue;
277      }
278
279      InlineCost IC2 = getInlineCost(CS2);
280      if (!IC2) {
281        callerWillBeRemoved = false;
282        continue;
283      }
284      if (IC2.isAlways())
285        continue;
286
287      // See if inlining or original callsite would erase the cost delta of
288      // this callsite. We subtract off the penalty for the call instruction,
289      // which we would be deleting.
290      if (IC2.getCostDelta() <= CandidateCost) {
291        inliningPreventsSomeOuterInline = true;
292        TotalSecondaryCost += IC2.getCost();
293      }
294    }
295    // If all outer calls to Caller would get inlined, the cost for the last
296    // one is set very low by getInlineCost, in anticipation that Caller will
297    // be removed entirely.  We did not account for this above unless there
298    // is only one caller of Caller.
299    if (callerWillBeRemoved && Caller->use_begin() != Caller->use_end())
300      TotalSecondaryCost += InlineConstants::LastCallToStaticBonus;
301
302    if (inliningPreventsSomeOuterInline && TotalSecondaryCost < IC.getCost()) {
303      DEBUG(dbgs() << "    NOT Inlining: " << *CS.getInstruction() <<
304           " Cost = " << IC.getCost() <<
305           ", outer Cost = " << TotalSecondaryCost << '\n');
306      return false;
307    }
308  }
309
310  DEBUG(dbgs() << "    Inlining: cost=" << IC.getCost()
311        << ", thres=" << (IC.getCostDelta() + IC.getCost())
312        << ", Call: " << *CS.getInstruction() << '\n');
313  return true;
314}
315
316/// InlineHistoryIncludes - Return true if the specified inline history ID
317/// indicates an inline history that includes the specified function.
318static bool InlineHistoryIncludes(Function *F, int InlineHistoryID,
319            const SmallVectorImpl<std::pair<Function*, int> > &InlineHistory) {
320  while (InlineHistoryID != -1) {
321    assert(unsigned(InlineHistoryID) < InlineHistory.size() &&
322           "Invalid inline history ID");
323    if (InlineHistory[InlineHistoryID].first == F)
324      return true;
325    InlineHistoryID = InlineHistory[InlineHistoryID].second;
326  }
327  return false;
328}
329
330bool Inliner::runOnSCC(CallGraphSCC &SCC) {
331  CallGraph &CG = getAnalysis<CallGraph>();
332  const TargetData *TD = getAnalysisIfAvailable<TargetData>();
333
334  SmallPtrSet<Function*, 8> SCCFunctions;
335  DEBUG(dbgs() << "Inliner visiting SCC:");
336  for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
337    Function *F = (*I)->getFunction();
338    if (F) SCCFunctions.insert(F);
339    DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE"));
340  }
341
342  // Scan through and identify all call sites ahead of time so that we only
343  // inline call sites in the original functions, not call sites that result
344  // from inlining other functions.
345  SmallVector<std::pair<CallSite, int>, 16> CallSites;
346
347  // When inlining a callee produces new call sites, we want to keep track of
348  // the fact that they were inlined from the callee.  This allows us to avoid
349  // infinite inlining in some obscure cases.  To represent this, we use an
350  // index into the InlineHistory vector.
351  SmallVector<std::pair<Function*, int>, 8> InlineHistory;
352
353  for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
354    Function *F = (*I)->getFunction();
355    if (!F) continue;
356
357    for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
358      for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
359        CallSite CS(cast<Value>(I));
360        // If this isn't a call, or it is a call to an intrinsic, it can
361        // never be inlined.
362        if (!CS || isa<IntrinsicInst>(I))
363          continue;
364
365        // If this is a direct call to an external function, we can never inline
366        // it.  If it is an indirect call, inlining may resolve it to be a
367        // direct call, so we keep it.
368        if (CS.getCalledFunction() && CS.getCalledFunction()->isDeclaration())
369          continue;
370
371        CallSites.push_back(std::make_pair(CS, -1));
372      }
373  }
374
375  DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n");
376
377  // If there are no calls in this function, exit early.
378  if (CallSites.empty())
379    return false;
380
381  // Now that we have all of the call sites, move the ones to functions in the
382  // current SCC to the end of the list.
383  unsigned FirstCallInSCC = CallSites.size();
384  for (unsigned i = 0; i < FirstCallInSCC; ++i)
385    if (Function *F = CallSites[i].first.getCalledFunction())
386      if (SCCFunctions.count(F))
387        std::swap(CallSites[i--], CallSites[--FirstCallInSCC]);
388
389
390  InlinedArrayAllocasTy InlinedArrayAllocas;
391  InlineFunctionInfo InlineInfo(&CG, TD);
392
393  // Now that we have all of the call sites, loop over them and inline them if
394  // it looks profitable to do so.
395  bool Changed = false;
396  bool LocalChange;
397  do {
398    LocalChange = false;
399    // Iterate over the outer loop because inlining functions can cause indirect
400    // calls to become direct calls.
401    for (unsigned CSi = 0; CSi != CallSites.size(); ++CSi) {
402      CallSite CS = CallSites[CSi].first;
403
404      Function *Caller = CS.getCaller();
405      Function *Callee = CS.getCalledFunction();
406
407      // If this call site is dead and it is to a readonly function, we should
408      // just delete the call instead of trying to inline it, regardless of
409      // size.  This happens because IPSCCP propagates the result out of the
410      // call and then we're left with the dead call.
411      if (isInstructionTriviallyDead(CS.getInstruction())) {
412        DEBUG(dbgs() << "    -> Deleting dead call: "
413                     << *CS.getInstruction() << "\n");
414        // Update the call graph by deleting the edge from Callee to Caller.
415        CG[Caller]->removeCallEdgeFor(CS);
416        CS.getInstruction()->eraseFromParent();
417        ++NumCallsDeleted;
418      } else {
419        // We can only inline direct calls to non-declarations.
420        if (Callee == 0 || Callee->isDeclaration()) continue;
421
422        // If this call site was obtained by inlining another function, verify
423        // that the include path for the function did not include the callee
424        // itself.  If so, we'd be recursively inlining the same function,
425        // which would provide the same callsites, which would cause us to
426        // infinitely inline.
427        int InlineHistoryID = CallSites[CSi].second;
428        if (InlineHistoryID != -1 &&
429            InlineHistoryIncludes(Callee, InlineHistoryID, InlineHistory))
430          continue;
431
432
433        // If the policy determines that we should inline this function,
434        // try to do so.
435        if (!shouldInline(CS))
436          continue;
437
438        // Attempt to inline the function.
439        if (!InlineCallIfPossible(CS, InlineInfo, InlinedArrayAllocas,
440                                  InlineHistoryID, InsertLifetime))
441          continue;
442        ++NumInlined;
443
444        // If inlining this function gave us any new call sites, throw them
445        // onto our worklist to process.  They are useful inline candidates.
446        if (!InlineInfo.InlinedCalls.empty()) {
447          // Create a new inline history entry for this, so that we remember
448          // that these new callsites came about due to inlining Callee.
449          int NewHistoryID = InlineHistory.size();
450          InlineHistory.push_back(std::make_pair(Callee, InlineHistoryID));
451
452          for (unsigned i = 0, e = InlineInfo.InlinedCalls.size();
453               i != e; ++i) {
454            Value *Ptr = InlineInfo.InlinedCalls[i];
455            CallSites.push_back(std::make_pair(CallSite(Ptr), NewHistoryID));
456          }
457        }
458      }
459
460      // If we inlined or deleted the last possible call site to the function,
461      // delete the function body now.
462      if (Callee && Callee->use_empty() && Callee->hasLocalLinkage() &&
463          // TODO: Can remove if in SCC now.
464          !SCCFunctions.count(Callee) &&
465
466          // The function may be apparently dead, but if there are indirect
467          // callgraph references to the node, we cannot delete it yet, this
468          // could invalidate the CGSCC iterator.
469          CG[Callee]->getNumReferences() == 0) {
470        DEBUG(dbgs() << "    -> Deleting dead function: "
471              << Callee->getName() << "\n");
472        CallGraphNode *CalleeNode = CG[Callee];
473
474        // Remove any call graph edges from the callee to its callees.
475        CalleeNode->removeAllCalledFunctions();
476
477        // Removing the node for callee from the call graph and delete it.
478        delete CG.removeFunctionFromModule(CalleeNode);
479        ++NumDeleted;
480      }
481
482      // Remove this call site from the list.  If possible, use
483      // swap/pop_back for efficiency, but do not use it if doing so would
484      // move a call site to a function in this SCC before the
485      // 'FirstCallInSCC' barrier.
486      if (SCC.isSingular()) {
487        CallSites[CSi] = CallSites.back();
488        CallSites.pop_back();
489      } else {
490        CallSites.erase(CallSites.begin()+CSi);
491      }
492      --CSi;
493
494      Changed = true;
495      LocalChange = true;
496    }
497  } while (LocalChange);
498
499  return Changed;
500}
501
502// doFinalization - Remove now-dead linkonce functions at the end of
503// processing to avoid breaking the SCC traversal.
504bool Inliner::doFinalization(CallGraph &CG) {
505  return removeDeadFunctions(CG);
506}
507
508/// removeDeadFunctions - Remove dead functions that are not included in
509/// DNR (Do Not Remove) list.
510bool Inliner::removeDeadFunctions(CallGraph &CG, bool AlwaysInlineOnly) {
511  SmallVector<CallGraphNode*, 16> FunctionsToRemove;
512
513  // Scan for all of the functions, looking for ones that should now be removed
514  // from the program.  Insert the dead ones in the FunctionsToRemove set.
515  for (CallGraph::iterator I = CG.begin(), E = CG.end(); I != E; ++I) {
516    CallGraphNode *CGN = I->second;
517    Function *F = CGN->getFunction();
518    if (!F || F->isDeclaration())
519      continue;
520
521    // Handle the case when this function is called and we only want to care
522    // about always-inline functions. This is a bit of a hack to share code
523    // between here and the InlineAlways pass.
524    if (AlwaysInlineOnly && !F->hasFnAttr(Attribute::AlwaysInline))
525      continue;
526
527    // If the only remaining users of the function are dead constants, remove
528    // them.
529    F->removeDeadConstantUsers();
530
531    if (!F->isDefTriviallyDead())
532      continue;
533
534    // Remove any call graph edges from the function to its callees.
535    CGN->removeAllCalledFunctions();
536
537    // Remove any edges from the external node to the function's call graph
538    // node.  These edges might have been made irrelegant due to
539    // optimization of the program.
540    CG.getExternalCallingNode()->removeAnyCallEdgeTo(CGN);
541
542    // Removing the node for callee from the call graph and delete it.
543    FunctionsToRemove.push_back(CGN);
544  }
545  if (FunctionsToRemove.empty())
546    return false;
547
548  // Now that we know which functions to delete, do so.  We didn't want to do
549  // this inline, because that would invalidate our CallGraph::iterator
550  // objects. :(
551  //
552  // Note that it doesn't matter that we are iterating over a non-stable order
553  // here to do this, it doesn't matter which order the functions are deleted
554  // in.
555  std::sort(FunctionsToRemove.begin(), FunctionsToRemove.end());
556  FunctionsToRemove.erase(std::unique(FunctionsToRemove.begin(),
557                                      FunctionsToRemove.end()),
558                          FunctionsToRemove.end());
559  for (SmallVectorImpl<CallGraphNode *>::iterator I = FunctionsToRemove.begin(),
560                                                  E = FunctionsToRemove.end();
561       I != E; ++I) {
562    delete CG.removeFunctionFromModule(*I);
563    ++NumDeleted;
564  }
565  return true;
566}
567