InlineSimple.cpp revision c71ca3cdd2d7a08b043ebb717cad0beadaf47450
1//===- InlineSimple.cpp - Code to perform simple function inlining --------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file was developed by the LLVM research group and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements bottom-up inlining of functions into callees.
11//
12//===----------------------------------------------------------------------===//
13
14#include "Inliner.h"
15#include "llvm/CallingConv.h"
16#include "llvm/Instructions.h"
17#include "llvm/IntrinsicInst.h"
18#include "llvm/Function.h"
19#include "llvm/Type.h"
20#include "llvm/Support/CallSite.h"
21#include "llvm/Transforms/IPO.h"
22using namespace llvm;
23
24namespace {
25  struct ArgInfo {
26    unsigned ConstantWeight;
27    unsigned AllocaWeight;
28
29    ArgInfo(unsigned CWeight, unsigned AWeight)
30      : ConstantWeight(CWeight), AllocaWeight(AWeight) {}
31  };
32
33  // FunctionInfo - For each function, calculate the size of it in blocks and
34  // instructions.
35  struct FunctionInfo {
36    // NumInsts, NumBlocks - Keep track of how large each function is, which is
37    // used to estimate the code size cost of inlining it.
38    unsigned NumInsts, NumBlocks;
39
40    // ArgumentWeights - Each formal argument of the function is inspected to
41    // see if it is used in any contexts where making it a constant or alloca
42    // would reduce the code size.  If so, we add some value to the argument
43    // entry here.
44    std::vector<ArgInfo> ArgumentWeights;
45
46    FunctionInfo() : NumInsts(0), NumBlocks(0) {}
47
48    /// analyzeFunction - Fill in the current structure with information gleaned
49    /// from the specified function.
50    void analyzeFunction(Function *F);
51  };
52
53  class SimpleInliner : public Inliner {
54    std::map<const Function*, FunctionInfo> CachedFunctionInfo;
55  public:
56    int getInlineCost(CallSite CS);
57  };
58  RegisterPass<SimpleInliner> X("inline", "Function Integration/Inlining");
59}
60
61Pass *llvm::createFunctionInliningPass() { return new SimpleInliner(); }
62
63// CountCodeReductionForConstant - Figure out an approximation for how many
64// instructions will be constant folded if the specified value is constant.
65//
66static unsigned CountCodeReductionForConstant(Value *V) {
67  unsigned Reduction = 0;
68  for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ++UI)
69    if (isa<BranchInst>(*UI))
70      Reduction += 40;          // Eliminating a conditional branch is a big win
71    else if (SwitchInst *SI = dyn_cast<SwitchInst>(*UI))
72      // Eliminating a switch is a big win, proportional to the number of edges
73      // deleted.
74      Reduction += (SI->getNumSuccessors()-1) * 40;
75    else if (CallInst *CI = dyn_cast<CallInst>(*UI)) {
76      // Turning an indirect call into a direct call is a BIG win
77      Reduction += CI->getCalledValue() == V ? 500 : 0;
78    } else if (InvokeInst *II = dyn_cast<InvokeInst>(*UI)) {
79      // Turning an indirect call into a direct call is a BIG win
80      Reduction += II->getCalledValue() == V ? 500 : 0;
81    } else {
82      // Figure out if this instruction will be removed due to simple constant
83      // propagation.
84      Instruction &Inst = cast<Instruction>(**UI);
85      bool AllOperandsConstant = true;
86      for (unsigned i = 0, e = Inst.getNumOperands(); i != e; ++i)
87        if (!isa<Constant>(Inst.getOperand(i)) && Inst.getOperand(i) != V) {
88          AllOperandsConstant = false;
89          break;
90        }
91
92      if (AllOperandsConstant) {
93        // We will get to remove this instruction...
94        Reduction += 7;
95
96        // And any other instructions that use it which become constants
97        // themselves.
98        Reduction += CountCodeReductionForConstant(&Inst);
99      }
100    }
101
102  return Reduction;
103}
104
105// CountCodeReductionForAlloca - Figure out an approximation of how much smaller
106// the function will be if it is inlined into a context where an argument
107// becomes an alloca.
108//
109static unsigned CountCodeReductionForAlloca(Value *V) {
110  if (!isa<PointerType>(V->getType())) return 0;  // Not a pointer
111  unsigned Reduction = 0;
112  for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
113    Instruction *I = cast<Instruction>(*UI);
114    if (isa<LoadInst>(I) || isa<StoreInst>(I))
115      Reduction += 10;
116    else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
117      // If the GEP has variable indices, we won't be able to do much with it.
118      for (Instruction::op_iterator I = GEP->op_begin()+1, E = GEP->op_end();
119           I != E; ++I)
120        if (!isa<Constant>(*I)) return 0;
121      Reduction += CountCodeReductionForAlloca(GEP)+15;
122    } else {
123      // If there is some other strange instruction, we're not going to be able
124      // to do much if we inline this.
125      return 0;
126    }
127  }
128
129  return Reduction;
130}
131
132/// analyzeFunction - Fill in the current structure with information gleaned
133/// from the specified function.
134void FunctionInfo::analyzeFunction(Function *F) {
135  unsigned NumInsts = 0, NumBlocks = 0;
136
137  // Look at the size of the callee.  Each basic block counts as 20 units, and
138  // each instruction counts as 10.
139  for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
140    for (BasicBlock::const_iterator II = BB->begin(), E = BB->end();
141         II != E; ++II) {
142      if (isa<DbgInfoIntrinsic>(II)) continue;  // Debug intrinsics don't count.
143
144      // Noop casts, including ptr <-> int,  don't count.
145      if (const CastInst *CI = dyn_cast<CastInst>(II)) {
146        if (CI->isLosslessCast() || isa<IntToPtrInst>(CI) ||
147            isa<PtrToIntInst>(CI))
148          continue;
149      } else if (const GetElementPtrInst *GEPI =
150                         dyn_cast<GetElementPtrInst>(II)) {
151        // If a GEP has all constant indices, it will probably be folded with
152        // a load/store.
153        bool AllConstant = true;
154        for (unsigned i = 1, e = GEPI->getNumOperands(); i != e; ++i)
155          if (!isa<ConstantInt>(GEPI->getOperand(i))) {
156            AllConstant = false;
157            break;
158          }
159        if (AllConstant) continue;
160      }
161
162      ++NumInsts;
163    }
164
165    ++NumBlocks;
166  }
167
168  this->NumBlocks = NumBlocks;
169  this->NumInsts  = NumInsts;
170
171  // Check out all of the arguments to the function, figuring out how much
172  // code can be eliminated if one of the arguments is a constant.
173  for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I)
174    ArgumentWeights.push_back(ArgInfo(CountCodeReductionForConstant(I),
175                                      CountCodeReductionForAlloca(I)));
176}
177
178
179// getInlineCost - The heuristic used to determine if we should inline the
180// function call or not.
181//
182int SimpleInliner::getInlineCost(CallSite CS) {
183  Instruction *TheCall = CS.getInstruction();
184  Function *Callee = CS.getCalledFunction();
185  const Function *Caller = TheCall->getParent()->getParent();
186
187  // Don't inline a directly recursive call.
188  if (Caller == Callee) return 2000000000;
189
190  // InlineCost - This value measures how good of an inline candidate this call
191  // site is to inline.  A lower inline cost make is more likely for the call to
192  // be inlined.  This value may go negative.
193  //
194  int InlineCost = 0;
195
196  // If there is only one call of the function, and it has internal linkage,
197  // make it almost guaranteed to be inlined.
198  //
199  if (Callee->hasInternalLinkage() && Callee->hasOneUse())
200    InlineCost -= 30000;
201
202  // If this function uses the coldcc calling convention, prefer not to inline
203  // it.
204  if (Callee->getCallingConv() == CallingConv::Cold)
205    InlineCost += 2000;
206
207  // If the instruction after the call, or if the normal destination of the
208  // invoke is an unreachable instruction, the function is noreturn.  As such,
209  // there is little point in inlining this.
210  if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
211    if (isa<UnreachableInst>(II->getNormalDest()->begin()))
212      InlineCost += 10000;
213  } else if (isa<UnreachableInst>(++BasicBlock::iterator(TheCall)))
214    InlineCost += 10000;
215
216  // Get information about the callee...
217  FunctionInfo &CalleeFI = CachedFunctionInfo[Callee];
218
219  // If we haven't calculated this information yet, do so now.
220  if (CalleeFI.NumBlocks == 0)
221    CalleeFI.analyzeFunction(Callee);
222
223  // Add to the inline quality for properties that make the call valuable to
224  // inline.  This includes factors that indicate that the result of inlining
225  // the function will be optimizable.  Currently this just looks at arguments
226  // passed into the function.
227  //
228  unsigned ArgNo = 0;
229  for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
230       I != E; ++I, ++ArgNo) {
231    // Each argument passed in has a cost at both the caller and the callee
232    // sides.  This favors functions that take many arguments over functions
233    // that take few arguments.
234    InlineCost -= 20;
235
236    // If this is a function being passed in, it is very likely that we will be
237    // able to turn an indirect function call into a direct function call.
238    if (isa<Function>(I))
239      InlineCost -= 100;
240
241    // If an alloca is passed in, inlining this function is likely to allow
242    // significant future optimization possibilities (like scalar promotion, and
243    // scalarization), so encourage the inlining of the function.
244    //
245    else if (isa<AllocaInst>(I)) {
246      if (ArgNo < CalleeFI.ArgumentWeights.size())
247        InlineCost -= CalleeFI.ArgumentWeights[ArgNo].AllocaWeight;
248
249    // If this is a constant being passed into the function, use the argument
250    // weights calculated for the callee to determine how much will be folded
251    // away with this information.
252    } else if (isa<Constant>(I)) {
253      if (ArgNo < CalleeFI.ArgumentWeights.size())
254        InlineCost -= CalleeFI.ArgumentWeights[ArgNo].ConstantWeight;
255    }
256  }
257
258  // Now that we have considered all of the factors that make the call site more
259  // likely to be inlined, look at factors that make us not want to inline it.
260
261  // Don't inline into something too big, which would make it bigger.  Here, we
262  // count each basic block as a single unit.
263  //
264  InlineCost += Caller->size()/20;
265
266
267  // Look at the size of the callee.  Each basic block counts as 20 units, and
268  // each instruction counts as 5.
269  InlineCost += CalleeFI.NumInsts*5 + CalleeFI.NumBlocks*20;
270  return InlineCost;
271}
272
273