InlineSimple.cpp revision 6dd196f762c934981ede17e197746b11426cd23a
1//===- InlineSimple.cpp - Code to perform simple function inlining --------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file was developed by the LLVM research group and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements bottom-up inlining of functions into callees.
11//
12//===----------------------------------------------------------------------===//
13
14#include "Inliner.h"
15#include "llvm/Instructions.h"
16#include "llvm/Function.h"
17#include "llvm/Type.h"
18#include "llvm/Support/CallSite.h"
19#include "llvm/Transforms/IPO.h"
20using namespace llvm;
21
22namespace {
23  struct ArgInfo {
24    unsigned ConstantWeight;
25    unsigned AllocaWeight;
26
27    ArgInfo(unsigned CWeight, unsigned AWeight)
28      : ConstantWeight(CWeight), AllocaWeight(AWeight) {}
29  };
30
31  // FunctionInfo - For each function, calculate the size of it in blocks and
32  // instructions.
33  struct FunctionInfo {
34    // NumInsts, NumBlocks - Keep track of how large each function is, which is
35    // used to estimate the code size cost of inlining it.
36    unsigned NumInsts, NumBlocks;
37
38    // ArgumentWeights - Each formal argument of the function is inspected to
39    // see if it is used in any contexts where making it a constant or alloca
40    // would reduce the code size.  If so, we add some value to the argument
41    // entry here.
42    std::vector<ArgInfo> ArgumentWeights;
43
44    FunctionInfo() : NumInsts(0), NumBlocks(0) {}
45  };
46
47  class SimpleInliner : public Inliner {
48    std::map<const Function*, FunctionInfo> CachedFunctionInfo;
49  public:
50    int getInlineCost(CallSite CS);
51  };
52  RegisterOpt<SimpleInliner> X("inline", "Function Integration/Inlining");
53}
54
55Pass *llvm::createFunctionInliningPass() { return new SimpleInliner(); }
56
57// CountCodeReductionForConstant - Figure out an approximation for how many
58// instructions will be constant folded if the specified value is constant.
59//
60static unsigned CountCodeReductionForConstant(Value *V) {
61  unsigned Reduction = 0;
62  for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ++UI)
63    if (isa<BranchInst>(*UI))
64      Reduction += 40;          // Eliminating a conditional branch is a big win
65    else if (SwitchInst *SI = dyn_cast<SwitchInst>(*UI))
66      // Eliminating a switch is a big win, proportional to the number of edges
67      // deleted.
68      Reduction += (SI->getNumSuccessors()-1) * 40;
69    else if (CallInst *CI = dyn_cast<CallInst>(*UI)) {
70      // Turning an indirect call into a direct call is a BIG win
71      Reduction += CI->getCalledValue() == V ? 500 : 0;
72    } else if (InvokeInst *II = dyn_cast<InvokeInst>(*UI)) {
73      // Turning an indirect call into a direct call is a BIG win
74      Reduction += II->getCalledValue() == V ? 500 : 0;
75    } else {
76      // Figure out if this instruction will be removed due to simple constant
77      // propagation.
78      Instruction &Inst = cast<Instruction>(**UI);
79      bool AllOperandsConstant = true;
80      for (unsigned i = 0, e = Inst.getNumOperands(); i != e; ++i)
81        if (!isa<Constant>(Inst.getOperand(i)) &&
82            !isa<GlobalValue>(Inst.getOperand(i)) && Inst.getOperand(i) != V) {
83          AllOperandsConstant = false;
84          break;
85        }
86
87      if (AllOperandsConstant) {
88        // We will get to remove this instruction...
89        Reduction += 7;
90
91        // And any other instructions that use it which become constants
92        // themselves.
93        Reduction += CountCodeReductionForConstant(&Inst);
94      }
95    }
96
97  return Reduction;
98}
99
100// CountCodeReductionForAlloca - Figure out an approximation of how much smaller
101// the function will be if it is inlined into a context where an argument
102// becomes an alloca.
103//
104static unsigned CountCodeReductionForAlloca(Value *V) {
105  if (!isa<PointerType>(V->getType())) return 0;  // Not a pointer
106  unsigned Reduction = 0;
107  for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
108    Instruction *I = cast<Instruction>(*UI);
109    if (isa<LoadInst>(I) || isa<StoreInst>(I))
110      Reduction += 10;
111    else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
112      // If the GEP has variable indices, we won't be able to do much with it.
113      for (Instruction::op_iterator I = GEP->op_begin()+1, E = GEP->op_end();
114           I != E; ++I)
115        if (!isa<Constant>(*I)) return 0;
116      Reduction += CountCodeReductionForAlloca(GEP)+15;
117    } else {
118      // If there is some other strange instruction, we're not going to be able
119      // to do much if we inline this.
120      return 0;
121    }
122  }
123
124  return Reduction;
125}
126
127// getInlineCost - The heuristic used to determine if we should inline the
128// function call or not.
129//
130int SimpleInliner::getInlineCost(CallSite CS) {
131  Instruction *TheCall = CS.getInstruction();
132  Function *Callee = CS.getCalledFunction();
133  const Function *Caller = TheCall->getParent()->getParent();
134
135  // Don't inline a directly recursive call.
136  if (Caller == Callee) return 2000000000;
137
138  // InlineCost - This value measures how good of an inline candidate this call
139  // site is to inline.  A lower inline cost make is more likely for the call to
140  // be inlined.  This value may go negative.
141  //
142  int InlineCost = 0;
143
144  // If there is only one call of the function, and it has internal linkage,
145  // make it almost guaranteed to be inlined.
146  //
147  if (Callee->hasInternalLinkage() && Callee->hasOneUse())
148    InlineCost -= 30000;
149
150  // Get information about the callee...
151  FunctionInfo &CalleeFI = CachedFunctionInfo[Callee];
152
153  // If we haven't calculated this information yet...
154  if (CalleeFI.NumBlocks == 0) {
155    unsigned NumInsts = 0, NumBlocks = 0;
156
157    // Look at the size of the callee.  Each basic block counts as 20 units, and
158    // each instruction counts as 10.
159    for (Function::const_iterator BB = Callee->begin(), E = Callee->end();
160         BB != E; ++BB) {
161      NumInsts += BB->size();
162      NumBlocks++;
163    }
164
165    CalleeFI.NumBlocks = NumBlocks;
166    CalleeFI.NumInsts  = NumInsts;
167
168    // Check out all of the arguments to the function, figuring out how much
169    // code can be eliminated if one of the arguments is a constant.
170    std::vector<ArgInfo> &ArgWeights = CalleeFI.ArgumentWeights;
171
172    for (Function::aiterator I = Callee->abegin(), E = Callee->aend();
173         I != E; ++I)
174      ArgWeights.push_back(ArgInfo(CountCodeReductionForConstant(I),
175                                   CountCodeReductionForAlloca(I)));
176  }
177
178
179  // Add to the inline quality for properties that make the call valuable to
180  // inline.  This includes factors that indicate that the result of inlining
181  // the function will be optimizable.  Currently this just looks at arguments
182  // passed into the function.
183  //
184  unsigned ArgNo = 0;
185  for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
186       I != E; ++I, ++ArgNo) {
187    // Each argument passed in has a cost at both the caller and the callee
188    // sides.  This favors functions that take many arguments over functions
189    // that take few arguments.
190    InlineCost -= 20;
191
192    // If this is a function being passed in, it is very likely that we will be
193    // able to turn an indirect function call into a direct function call.
194    if (isa<Function>(I))
195      InlineCost -= 100;
196
197    // If an alloca is passed in, inlining this function is likely to allow
198    // significant future optimization possibilities (like scalar promotion, and
199    // scalarization), so encourage the inlining of the function.
200    //
201    else if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) {
202      if (ArgNo < CalleeFI.ArgumentWeights.size())
203        InlineCost -= CalleeFI.ArgumentWeights[ArgNo].AllocaWeight;
204
205    // If this is a constant being passed into the function, use the argument
206    // weights calculated for the callee to determine how much will be folded
207    // away with this information.
208    } else if (isa<Constant>(I) || isa<GlobalVariable>(I)) {
209      if (ArgNo < CalleeFI.ArgumentWeights.size())
210        InlineCost -= CalleeFI.ArgumentWeights[ArgNo].ConstantWeight;
211    }
212  }
213
214  // Now that we have considered all of the factors that make the call site more
215  // likely to be inlined, look at factors that make us not want to inline it.
216
217  // Don't inline into something too big, which would make it bigger.  Here, we
218  // count each basic block as a single unit.
219  //
220  // FIXME: THIS IS A TOTAL HACK.  The problem is that we don't keep track of
221  // which call sites are the result of an inlining operation.  Because of this,
222  // if we inline a recursive function into a callee, we will see a new call to
223  // the recursive function.  Every time we inline we get a new callsite for the
224  // function, which only stops when the caller reaches its inlining limit.
225  // Until the real problem is fixed, we apply this gnasty hack.
226  InlineCost += Caller->size();
227
228
229  // Look at the size of the callee.  Each basic block counts as 20 units, and
230  // each instruction counts as 5.
231  InlineCost += CalleeFI.NumInsts*5 + CalleeFI.NumBlocks*20;
232  return InlineCost;
233}
234
235