InlineCost.cpp revision 13086a658ae06046ded902229f9918b8bad505bd
1//===- InlineCost.cpp - Cost analysis for inliner -------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements inline cost analysis. 11// 12//===----------------------------------------------------------------------===// 13 14#define DEBUG_TYPE "inline-cost" 15#include "llvm/Analysis/InlineCost.h" 16#include "llvm/ADT/STLExtras.h" 17#include "llvm/ADT/SetVector.h" 18#include "llvm/ADT/SmallPtrSet.h" 19#include "llvm/ADT/SmallVector.h" 20#include "llvm/ADT/Statistic.h" 21#include "llvm/Analysis/ConstantFolding.h" 22#include "llvm/Analysis/InstructionSimplify.h" 23#include "llvm/Analysis/TargetTransformInfo.h" 24#include "llvm/IR/CallingConv.h" 25#include "llvm/IR/DataLayout.h" 26#include "llvm/IR/GlobalAlias.h" 27#include "llvm/IR/IntrinsicInst.h" 28#include "llvm/IR/Operator.h" 29#include "llvm/InstVisitor.h" 30#include "llvm/Support/CallSite.h" 31#include "llvm/Support/Debug.h" 32#include "llvm/Support/GetElementPtrTypeIterator.h" 33#include "llvm/Support/raw_ostream.h" 34 35using namespace llvm; 36 37STATISTIC(NumCallsAnalyzed, "Number of call sites analyzed"); 38 39namespace { 40 41class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> { 42 typedef InstVisitor<CallAnalyzer, bool> Base; 43 friend class InstVisitor<CallAnalyzer, bool>; 44 45 // DataLayout if available, or null. 46 const DataLayout *const TD; 47 48 /// The TargetTransformInfo available for this compilation. 49 const TargetTransformInfo &TTI; 50 51 // The called function. 52 Function &F; 53 54 int Threshold; 55 int Cost; 56 57 bool IsCallerRecursive; 58 bool IsRecursiveCall; 59 bool ExposesReturnsTwice; 60 bool HasDynamicAlloca; 61 bool ContainsNoDuplicateCall; 62 63 /// Number of bytes allocated statically by the callee. 64 uint64_t AllocatedSize; 65 unsigned NumInstructions, NumVectorInstructions; 66 int FiftyPercentVectorBonus, TenPercentVectorBonus; 67 int VectorBonus; 68 69 // While we walk the potentially-inlined instructions, we build up and 70 // maintain a mapping of simplified values specific to this callsite. The 71 // idea is to propagate any special information we have about arguments to 72 // this call through the inlinable section of the function, and account for 73 // likely simplifications post-inlining. The most important aspect we track 74 // is CFG altering simplifications -- when we prove a basic block dead, that 75 // can cause dramatic shifts in the cost of inlining a function. 76 DenseMap<Value *, Constant *> SimplifiedValues; 77 78 // Keep track of the values which map back (through function arguments) to 79 // allocas on the caller stack which could be simplified through SROA. 80 DenseMap<Value *, Value *> SROAArgValues; 81 82 // The mapping of caller Alloca values to their accumulated cost savings. If 83 // we have to disable SROA for one of the allocas, this tells us how much 84 // cost must be added. 85 DenseMap<Value *, int> SROAArgCosts; 86 87 // Keep track of values which map to a pointer base and constant offset. 88 DenseMap<Value *, std::pair<Value *, APInt> > ConstantOffsetPtrs; 89 90 // Custom simplification helper routines. 91 bool isAllocaDerivedArg(Value *V); 92 bool lookupSROAArgAndCost(Value *V, Value *&Arg, 93 DenseMap<Value *, int>::iterator &CostIt); 94 void disableSROA(DenseMap<Value *, int>::iterator CostIt); 95 void disableSROA(Value *V); 96 void accumulateSROACost(DenseMap<Value *, int>::iterator CostIt, 97 int InstructionCost); 98 bool handleSROACandidate(bool IsSROAValid, 99 DenseMap<Value *, int>::iterator CostIt, 100 int InstructionCost); 101 bool isGEPOffsetConstant(GetElementPtrInst &GEP); 102 bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset); 103 bool simplifyCallSite(Function *F, CallSite CS); 104 ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V); 105 106 // Custom analysis routines. 107 bool analyzeBlock(BasicBlock *BB); 108 109 // Disable several entry points to the visitor so we don't accidentally use 110 // them by declaring but not defining them here. 111 void visit(Module *); void visit(Module &); 112 void visit(Function *); void visit(Function &); 113 void visit(BasicBlock *); void visit(BasicBlock &); 114 115 // Provide base case for our instruction visit. 116 bool visitInstruction(Instruction &I); 117 118 // Our visit overrides. 119 bool visitAlloca(AllocaInst &I); 120 bool visitPHI(PHINode &I); 121 bool visitGetElementPtr(GetElementPtrInst &I); 122 bool visitBitCast(BitCastInst &I); 123 bool visitPtrToInt(PtrToIntInst &I); 124 bool visitIntToPtr(IntToPtrInst &I); 125 bool visitCastInst(CastInst &I); 126 bool visitUnaryInstruction(UnaryInstruction &I); 127 bool visitICmp(ICmpInst &I); 128 bool visitSub(BinaryOperator &I); 129 bool visitBinaryOperator(BinaryOperator &I); 130 bool visitLoad(LoadInst &I); 131 bool visitStore(StoreInst &I); 132 bool visitExtractValue(ExtractValueInst &I); 133 bool visitInsertValue(InsertValueInst &I); 134 bool visitCallSite(CallSite CS); 135 136public: 137 CallAnalyzer(const DataLayout *TD, const TargetTransformInfo &TTI, 138 Function &Callee, int Threshold) 139 : TD(TD), TTI(TTI), F(Callee), Threshold(Threshold), Cost(0), 140 IsCallerRecursive(false), IsRecursiveCall(false), 141 ExposesReturnsTwice(false), HasDynamicAlloca(false), 142 ContainsNoDuplicateCall(false), AllocatedSize(0), NumInstructions(0), 143 NumVectorInstructions(0), FiftyPercentVectorBonus(0), 144 TenPercentVectorBonus(0), VectorBonus(0), NumConstantArgs(0), 145 NumConstantOffsetPtrArgs(0), NumAllocaArgs(0), NumConstantPtrCmps(0), 146 NumConstantPtrDiffs(0), NumInstructionsSimplified(0), 147 SROACostSavings(0), SROACostSavingsLost(0) {} 148 149 bool analyzeCall(CallSite CS); 150 151 int getThreshold() { return Threshold; } 152 int getCost() { return Cost; } 153 154 // Keep a bunch of stats about the cost savings found so we can print them 155 // out when debugging. 156 unsigned NumConstantArgs; 157 unsigned NumConstantOffsetPtrArgs; 158 unsigned NumAllocaArgs; 159 unsigned NumConstantPtrCmps; 160 unsigned NumConstantPtrDiffs; 161 unsigned NumInstructionsSimplified; 162 unsigned SROACostSavings; 163 unsigned SROACostSavingsLost; 164 165 void dump(); 166}; 167 168} // namespace 169 170/// \brief Test whether the given value is an Alloca-derived function argument. 171bool CallAnalyzer::isAllocaDerivedArg(Value *V) { 172 return SROAArgValues.count(V); 173} 174 175/// \brief Lookup the SROA-candidate argument and cost iterator which V maps to. 176/// Returns false if V does not map to a SROA-candidate. 177bool CallAnalyzer::lookupSROAArgAndCost( 178 Value *V, Value *&Arg, DenseMap<Value *, int>::iterator &CostIt) { 179 if (SROAArgValues.empty() || SROAArgCosts.empty()) 180 return false; 181 182 DenseMap<Value *, Value *>::iterator ArgIt = SROAArgValues.find(V); 183 if (ArgIt == SROAArgValues.end()) 184 return false; 185 186 Arg = ArgIt->second; 187 CostIt = SROAArgCosts.find(Arg); 188 return CostIt != SROAArgCosts.end(); 189} 190 191/// \brief Disable SROA for the candidate marked by this cost iterator. 192/// 193/// This marks the candidate as no longer viable for SROA, and adds the cost 194/// savings associated with it back into the inline cost measurement. 195void CallAnalyzer::disableSROA(DenseMap<Value *, int>::iterator CostIt) { 196 // If we're no longer able to perform SROA we need to undo its cost savings 197 // and prevent subsequent analysis. 198 Cost += CostIt->second; 199 SROACostSavings -= CostIt->second; 200 SROACostSavingsLost += CostIt->second; 201 SROAArgCosts.erase(CostIt); 202} 203 204/// \brief If 'V' maps to a SROA candidate, disable SROA for it. 205void CallAnalyzer::disableSROA(Value *V) { 206 Value *SROAArg; 207 DenseMap<Value *, int>::iterator CostIt; 208 if (lookupSROAArgAndCost(V, SROAArg, CostIt)) 209 disableSROA(CostIt); 210} 211 212/// \brief Accumulate the given cost for a particular SROA candidate. 213void CallAnalyzer::accumulateSROACost(DenseMap<Value *, int>::iterator CostIt, 214 int InstructionCost) { 215 CostIt->second += InstructionCost; 216 SROACostSavings += InstructionCost; 217} 218 219/// \brief Helper for the common pattern of handling a SROA candidate. 220/// Either accumulates the cost savings if the SROA remains valid, or disables 221/// SROA for the candidate. 222bool CallAnalyzer::handleSROACandidate(bool IsSROAValid, 223 DenseMap<Value *, int>::iterator CostIt, 224 int InstructionCost) { 225 if (IsSROAValid) { 226 accumulateSROACost(CostIt, InstructionCost); 227 return true; 228 } 229 230 disableSROA(CostIt); 231 return false; 232} 233 234/// \brief Check whether a GEP's indices are all constant. 235/// 236/// Respects any simplified values known during the analysis of this callsite. 237bool CallAnalyzer::isGEPOffsetConstant(GetElementPtrInst &GEP) { 238 for (User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end(); I != E; ++I) 239 if (!isa<Constant>(*I) && !SimplifiedValues.lookup(*I)) 240 return false; 241 242 return true; 243} 244 245/// \brief Accumulate a constant GEP offset into an APInt if possible. 246/// 247/// Returns false if unable to compute the offset for any reason. Respects any 248/// simplified values known during the analysis of this callsite. 249bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) { 250 if (!TD) 251 return false; 252 253 unsigned IntPtrWidth = TD->getPointerSizeInBits(); 254 assert(IntPtrWidth == Offset.getBitWidth()); 255 256 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); 257 GTI != GTE; ++GTI) { 258 ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand()); 259 if (!OpC) 260 if (Constant *SimpleOp = SimplifiedValues.lookup(GTI.getOperand())) 261 OpC = dyn_cast<ConstantInt>(SimpleOp); 262 if (!OpC) 263 return false; 264 if (OpC->isZero()) continue; 265 266 // Handle a struct index, which adds its field offset to the pointer. 267 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 268 unsigned ElementIdx = OpC->getZExtValue(); 269 const StructLayout *SL = TD->getStructLayout(STy); 270 Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx)); 271 continue; 272 } 273 274 APInt TypeSize(IntPtrWidth, TD->getTypeAllocSize(GTI.getIndexedType())); 275 Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize; 276 } 277 return true; 278} 279 280bool CallAnalyzer::visitAlloca(AllocaInst &I) { 281 // FIXME: Check whether inlining will turn a dynamic alloca into a static 282 // alloca, and handle that case. 283 284 // Accumulate the allocated size. 285 if (I.isStaticAlloca()) { 286 Type *Ty = I.getAllocatedType(); 287 AllocatedSize += (TD ? TD->getTypeAllocSize(Ty) : 288 Ty->getPrimitiveSizeInBits()); 289 } 290 291 // We will happily inline static alloca instructions. 292 if (I.isStaticAlloca()) 293 return Base::visitAlloca(I); 294 295 // FIXME: This is overly conservative. Dynamic allocas are inefficient for 296 // a variety of reasons, and so we would like to not inline them into 297 // functions which don't currently have a dynamic alloca. This simply 298 // disables inlining altogether in the presence of a dynamic alloca. 299 HasDynamicAlloca = true; 300 return false; 301} 302 303bool CallAnalyzer::visitPHI(PHINode &I) { 304 // FIXME: We should potentially be tracking values through phi nodes, 305 // especially when they collapse to a single value due to deleted CFG edges 306 // during inlining. 307 308 // FIXME: We need to propagate SROA *disabling* through phi nodes, even 309 // though we don't want to propagate it's bonuses. The idea is to disable 310 // SROA if it *might* be used in an inappropriate manner. 311 312 // Phi nodes are always zero-cost. 313 return true; 314} 315 316bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) { 317 Value *SROAArg; 318 DenseMap<Value *, int>::iterator CostIt; 319 bool SROACandidate = lookupSROAArgAndCost(I.getPointerOperand(), 320 SROAArg, CostIt); 321 322 // Try to fold GEPs of constant-offset call site argument pointers. This 323 // requires target data and inbounds GEPs. 324 if (TD && I.isInBounds()) { 325 // Check if we have a base + offset for the pointer. 326 Value *Ptr = I.getPointerOperand(); 327 std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Ptr); 328 if (BaseAndOffset.first) { 329 // Check if the offset of this GEP is constant, and if so accumulate it 330 // into Offset. 331 if (!accumulateGEPOffset(cast<GEPOperator>(I), BaseAndOffset.second)) { 332 // Non-constant GEPs aren't folded, and disable SROA. 333 if (SROACandidate) 334 disableSROA(CostIt); 335 return false; 336 } 337 338 // Add the result as a new mapping to Base + Offset. 339 ConstantOffsetPtrs[&I] = BaseAndOffset; 340 341 // Also handle SROA candidates here, we already know that the GEP is 342 // all-constant indexed. 343 if (SROACandidate) 344 SROAArgValues[&I] = SROAArg; 345 346 return true; 347 } 348 } 349 350 if (isGEPOffsetConstant(I)) { 351 if (SROACandidate) 352 SROAArgValues[&I] = SROAArg; 353 354 // Constant GEPs are modeled as free. 355 return true; 356 } 357 358 // Variable GEPs will require math and will disable SROA. 359 if (SROACandidate) 360 disableSROA(CostIt); 361 return false; 362} 363 364bool CallAnalyzer::visitBitCast(BitCastInst &I) { 365 // Propagate constants through bitcasts. 366 Constant *COp = dyn_cast<Constant>(I.getOperand(0)); 367 if (!COp) 368 COp = SimplifiedValues.lookup(I.getOperand(0)); 369 if (COp) 370 if (Constant *C = ConstantExpr::getBitCast(COp, I.getType())) { 371 SimplifiedValues[&I] = C; 372 return true; 373 } 374 375 // Track base/offsets through casts 376 std::pair<Value *, APInt> BaseAndOffset 377 = ConstantOffsetPtrs.lookup(I.getOperand(0)); 378 // Casts don't change the offset, just wrap it up. 379 if (BaseAndOffset.first) 380 ConstantOffsetPtrs[&I] = BaseAndOffset; 381 382 // Also look for SROA candidates here. 383 Value *SROAArg; 384 DenseMap<Value *, int>::iterator CostIt; 385 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) 386 SROAArgValues[&I] = SROAArg; 387 388 // Bitcasts are always zero cost. 389 return true; 390} 391 392bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) { 393 // Propagate constants through ptrtoint. 394 Constant *COp = dyn_cast<Constant>(I.getOperand(0)); 395 if (!COp) 396 COp = SimplifiedValues.lookup(I.getOperand(0)); 397 if (COp) 398 if (Constant *C = ConstantExpr::getPtrToInt(COp, I.getType())) { 399 SimplifiedValues[&I] = C; 400 return true; 401 } 402 403 // Track base/offset pairs when converted to a plain integer provided the 404 // integer is large enough to represent the pointer. 405 unsigned IntegerSize = I.getType()->getScalarSizeInBits(); 406 if (TD && IntegerSize >= TD->getPointerSizeInBits()) { 407 std::pair<Value *, APInt> BaseAndOffset 408 = ConstantOffsetPtrs.lookup(I.getOperand(0)); 409 if (BaseAndOffset.first) 410 ConstantOffsetPtrs[&I] = BaseAndOffset; 411 } 412 413 // This is really weird. Technically, ptrtoint will disable SROA. However, 414 // unless that ptrtoint is *used* somewhere in the live basic blocks after 415 // inlining, it will be nuked, and SROA should proceed. All of the uses which 416 // would block SROA would also block SROA if applied directly to a pointer, 417 // and so we can just add the integer in here. The only places where SROA is 418 // preserved either cannot fire on an integer, or won't in-and-of themselves 419 // disable SROA (ext) w/o some later use that we would see and disable. 420 Value *SROAArg; 421 DenseMap<Value *, int>::iterator CostIt; 422 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) 423 SROAArgValues[&I] = SROAArg; 424 425 return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I); 426} 427 428bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) { 429 // Propagate constants through ptrtoint. 430 Constant *COp = dyn_cast<Constant>(I.getOperand(0)); 431 if (!COp) 432 COp = SimplifiedValues.lookup(I.getOperand(0)); 433 if (COp) 434 if (Constant *C = ConstantExpr::getIntToPtr(COp, I.getType())) { 435 SimplifiedValues[&I] = C; 436 return true; 437 } 438 439 // Track base/offset pairs when round-tripped through a pointer without 440 // modifications provided the integer is not too large. 441 Value *Op = I.getOperand(0); 442 unsigned IntegerSize = Op->getType()->getScalarSizeInBits(); 443 if (TD && IntegerSize <= TD->getPointerSizeInBits()) { 444 std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op); 445 if (BaseAndOffset.first) 446 ConstantOffsetPtrs[&I] = BaseAndOffset; 447 } 448 449 // "Propagate" SROA here in the same manner as we do for ptrtoint above. 450 Value *SROAArg; 451 DenseMap<Value *, int>::iterator CostIt; 452 if (lookupSROAArgAndCost(Op, SROAArg, CostIt)) 453 SROAArgValues[&I] = SROAArg; 454 455 return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I); 456} 457 458bool CallAnalyzer::visitCastInst(CastInst &I) { 459 // Propagate constants through ptrtoint. 460 Constant *COp = dyn_cast<Constant>(I.getOperand(0)); 461 if (!COp) 462 COp = SimplifiedValues.lookup(I.getOperand(0)); 463 if (COp) 464 if (Constant *C = ConstantExpr::getCast(I.getOpcode(), COp, I.getType())) { 465 SimplifiedValues[&I] = C; 466 return true; 467 } 468 469 // Disable SROA in the face of arbitrary casts we don't whitelist elsewhere. 470 disableSROA(I.getOperand(0)); 471 472 return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I); 473} 474 475bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) { 476 Value *Operand = I.getOperand(0); 477 Constant *Ops[1] = { dyn_cast<Constant>(Operand) }; 478 if (Ops[0] || (Ops[0] = SimplifiedValues.lookup(Operand))) 479 if (Constant *C = ConstantFoldInstOperands(I.getOpcode(), I.getType(), 480 Ops, TD)) { 481 SimplifiedValues[&I] = C; 482 return true; 483 } 484 485 // Disable any SROA on the argument to arbitrary unary operators. 486 disableSROA(Operand); 487 488 return false; 489} 490 491bool CallAnalyzer::visitICmp(ICmpInst &I) { 492 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 493 // First try to handle simplified comparisons. 494 if (!isa<Constant>(LHS)) 495 if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS)) 496 LHS = SimpleLHS; 497 if (!isa<Constant>(RHS)) 498 if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS)) 499 RHS = SimpleRHS; 500 if (Constant *CLHS = dyn_cast<Constant>(LHS)) 501 if (Constant *CRHS = dyn_cast<Constant>(RHS)) 502 if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) { 503 SimplifiedValues[&I] = C; 504 return true; 505 } 506 507 // Otherwise look for a comparison between constant offset pointers with 508 // a common base. 509 Value *LHSBase, *RHSBase; 510 APInt LHSOffset, RHSOffset; 511 llvm::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS); 512 if (LHSBase) { 513 llvm::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS); 514 if (RHSBase && LHSBase == RHSBase) { 515 // We have common bases, fold the icmp to a constant based on the 516 // offsets. 517 Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset); 518 Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset); 519 if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) { 520 SimplifiedValues[&I] = C; 521 ++NumConstantPtrCmps; 522 return true; 523 } 524 } 525 } 526 527 // If the comparison is an equality comparison with null, we can simplify it 528 // for any alloca-derived argument. 529 if (I.isEquality() && isa<ConstantPointerNull>(I.getOperand(1))) 530 if (isAllocaDerivedArg(I.getOperand(0))) { 531 // We can actually predict the result of comparisons between an 532 // alloca-derived value and null. Note that this fires regardless of 533 // SROA firing. 534 bool IsNotEqual = I.getPredicate() == CmpInst::ICMP_NE; 535 SimplifiedValues[&I] = IsNotEqual ? ConstantInt::getTrue(I.getType()) 536 : ConstantInt::getFalse(I.getType()); 537 return true; 538 } 539 540 // Finally check for SROA candidates in comparisons. 541 Value *SROAArg; 542 DenseMap<Value *, int>::iterator CostIt; 543 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) { 544 if (isa<ConstantPointerNull>(I.getOperand(1))) { 545 accumulateSROACost(CostIt, InlineConstants::InstrCost); 546 return true; 547 } 548 549 disableSROA(CostIt); 550 } 551 552 return false; 553} 554 555bool CallAnalyzer::visitSub(BinaryOperator &I) { 556 // Try to handle a special case: we can fold computing the difference of two 557 // constant-related pointers. 558 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 559 Value *LHSBase, *RHSBase; 560 APInt LHSOffset, RHSOffset; 561 llvm::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS); 562 if (LHSBase) { 563 llvm::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS); 564 if (RHSBase && LHSBase == RHSBase) { 565 // We have common bases, fold the subtract to a constant based on the 566 // offsets. 567 Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset); 568 Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset); 569 if (Constant *C = ConstantExpr::getSub(CLHS, CRHS)) { 570 SimplifiedValues[&I] = C; 571 ++NumConstantPtrDiffs; 572 return true; 573 } 574 } 575 } 576 577 // Otherwise, fall back to the generic logic for simplifying and handling 578 // instructions. 579 return Base::visitSub(I); 580} 581 582bool CallAnalyzer::visitBinaryOperator(BinaryOperator &I) { 583 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 584 if (!isa<Constant>(LHS)) 585 if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS)) 586 LHS = SimpleLHS; 587 if (!isa<Constant>(RHS)) 588 if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS)) 589 RHS = SimpleRHS; 590 Value *SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS, TD); 591 if (Constant *C = dyn_cast_or_null<Constant>(SimpleV)) { 592 SimplifiedValues[&I] = C; 593 return true; 594 } 595 596 // Disable any SROA on arguments to arbitrary, unsimplified binary operators. 597 disableSROA(LHS); 598 disableSROA(RHS); 599 600 return false; 601} 602 603bool CallAnalyzer::visitLoad(LoadInst &I) { 604 Value *SROAArg; 605 DenseMap<Value *, int>::iterator CostIt; 606 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) { 607 if (I.isSimple()) { 608 accumulateSROACost(CostIt, InlineConstants::InstrCost); 609 return true; 610 } 611 612 disableSROA(CostIt); 613 } 614 615 return false; 616} 617 618bool CallAnalyzer::visitStore(StoreInst &I) { 619 Value *SROAArg; 620 DenseMap<Value *, int>::iterator CostIt; 621 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) { 622 if (I.isSimple()) { 623 accumulateSROACost(CostIt, InlineConstants::InstrCost); 624 return true; 625 } 626 627 disableSROA(CostIt); 628 } 629 630 return false; 631} 632 633bool CallAnalyzer::visitExtractValue(ExtractValueInst &I) { 634 // Constant folding for extract value is trivial. 635 Constant *C = dyn_cast<Constant>(I.getAggregateOperand()); 636 if (!C) 637 C = SimplifiedValues.lookup(I.getAggregateOperand()); 638 if (C) { 639 SimplifiedValues[&I] = ConstantExpr::getExtractValue(C, I.getIndices()); 640 return true; 641 } 642 643 // SROA can look through these but give them a cost. 644 return false; 645} 646 647bool CallAnalyzer::visitInsertValue(InsertValueInst &I) { 648 // Constant folding for insert value is trivial. 649 Constant *AggC = dyn_cast<Constant>(I.getAggregateOperand()); 650 if (!AggC) 651 AggC = SimplifiedValues.lookup(I.getAggregateOperand()); 652 Constant *InsertedC = dyn_cast<Constant>(I.getInsertedValueOperand()); 653 if (!InsertedC) 654 InsertedC = SimplifiedValues.lookup(I.getInsertedValueOperand()); 655 if (AggC && InsertedC) { 656 SimplifiedValues[&I] = ConstantExpr::getInsertValue(AggC, InsertedC, 657 I.getIndices()); 658 return true; 659 } 660 661 // SROA can look through these but give them a cost. 662 return false; 663} 664 665/// \brief Try to simplify a call site. 666/// 667/// Takes a concrete function and callsite and tries to actually simplify it by 668/// analyzing the arguments and call itself with instsimplify. Returns true if 669/// it has simplified the callsite to some other entity (a constant), making it 670/// free. 671bool CallAnalyzer::simplifyCallSite(Function *F, CallSite CS) { 672 // FIXME: Using the instsimplify logic directly for this is inefficient 673 // because we have to continually rebuild the argument list even when no 674 // simplifications can be performed. Until that is fixed with remapping 675 // inside of instsimplify, directly constant fold calls here. 676 if (!canConstantFoldCallTo(F)) 677 return false; 678 679 // Try to re-map the arguments to constants. 680 SmallVector<Constant *, 4> ConstantArgs; 681 ConstantArgs.reserve(CS.arg_size()); 682 for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); 683 I != E; ++I) { 684 Constant *C = dyn_cast<Constant>(*I); 685 if (!C) 686 C = dyn_cast_or_null<Constant>(SimplifiedValues.lookup(*I)); 687 if (!C) 688 return false; // This argument doesn't map to a constant. 689 690 ConstantArgs.push_back(C); 691 } 692 if (Constant *C = ConstantFoldCall(F, ConstantArgs)) { 693 SimplifiedValues[CS.getInstruction()] = C; 694 return true; 695 } 696 697 return false; 698} 699 700bool CallAnalyzer::visitCallSite(CallSite CS) { 701 if (CS.isCall() && cast<CallInst>(CS.getInstruction())->canReturnTwice() && 702 !F.getAttributes().hasAttribute(AttributeSet::FunctionIndex, 703 Attribute::ReturnsTwice)) { 704 // This aborts the entire analysis. 705 ExposesReturnsTwice = true; 706 return false; 707 } 708 if (CS.isCall() && 709 cast<CallInst>(CS.getInstruction())->hasFnAttr(Attribute::NoDuplicate)) 710 ContainsNoDuplicateCall = true; 711 712 if (Function *F = CS.getCalledFunction()) { 713 // When we have a concrete function, first try to simplify it directly. 714 if (simplifyCallSite(F, CS)) 715 return true; 716 717 // Next check if it is an intrinsic we know about. 718 // FIXME: Lift this into part of the InstVisitor. 719 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) { 720 switch (II->getIntrinsicID()) { 721 default: 722 return Base::visitCallSite(CS); 723 724 case Intrinsic::memset: 725 case Intrinsic::memcpy: 726 case Intrinsic::memmove: 727 // SROA can usually chew through these intrinsics, but they aren't free. 728 return false; 729 } 730 } 731 732 if (F == CS.getInstruction()->getParent()->getParent()) { 733 // This flag will fully abort the analysis, so don't bother with anything 734 // else. 735 IsRecursiveCall = true; 736 return false; 737 } 738 739 if (TTI.isLoweredToCall(F)) { 740 // We account for the average 1 instruction per call argument setup 741 // here. 742 Cost += CS.arg_size() * InlineConstants::InstrCost; 743 744 // Everything other than inline ASM will also have a significant cost 745 // merely from making the call. 746 if (!isa<InlineAsm>(CS.getCalledValue())) 747 Cost += InlineConstants::CallPenalty; 748 } 749 750 return Base::visitCallSite(CS); 751 } 752 753 // Otherwise we're in a very special case -- an indirect function call. See 754 // if we can be particularly clever about this. 755 Value *Callee = CS.getCalledValue(); 756 757 // First, pay the price of the argument setup. We account for the average 758 // 1 instruction per call argument setup here. 759 Cost += CS.arg_size() * InlineConstants::InstrCost; 760 761 // Next, check if this happens to be an indirect function call to a known 762 // function in this inline context. If not, we've done all we can. 763 Function *F = dyn_cast_or_null<Function>(SimplifiedValues.lookup(Callee)); 764 if (!F) 765 return Base::visitCallSite(CS); 766 767 // If we have a constant that we are calling as a function, we can peer 768 // through it and see the function target. This happens not infrequently 769 // during devirtualization and so we want to give it a hefty bonus for 770 // inlining, but cap that bonus in the event that inlining wouldn't pan 771 // out. Pretend to inline the function, with a custom threshold. 772 CallAnalyzer CA(TD, TTI, *F, InlineConstants::IndirectCallThreshold); 773 if (CA.analyzeCall(CS)) { 774 // We were able to inline the indirect call! Subtract the cost from the 775 // bonus we want to apply, but don't go below zero. 776 Cost -= std::max(0, InlineConstants::IndirectCallThreshold - CA.getCost()); 777 } 778 779 return Base::visitCallSite(CS); 780} 781 782bool CallAnalyzer::visitInstruction(Instruction &I) { 783 // Some instructions are free. All of the free intrinsics can also be 784 // handled by SROA, etc. 785 if (TargetTransformInfo::TCC_Free == TTI.getUserCost(&I)) 786 return true; 787 788 // We found something we don't understand or can't handle. Mark any SROA-able 789 // values in the operand list as no longer viable. 790 for (User::op_iterator OI = I.op_begin(), OE = I.op_end(); OI != OE; ++OI) 791 disableSROA(*OI); 792 793 return false; 794} 795 796 797/// \brief Analyze a basic block for its contribution to the inline cost. 798/// 799/// This method walks the analyzer over every instruction in the given basic 800/// block and accounts for their cost during inlining at this callsite. It 801/// aborts early if the threshold has been exceeded or an impossible to inline 802/// construct has been detected. It returns false if inlining is no longer 803/// viable, and true if inlining remains viable. 804bool CallAnalyzer::analyzeBlock(BasicBlock *BB) { 805 for (BasicBlock::iterator I = BB->begin(), E = llvm::prior(BB->end()); 806 I != E; ++I) { 807 ++NumInstructions; 808 if (isa<ExtractElementInst>(I) || I->getType()->isVectorTy()) 809 ++NumVectorInstructions; 810 811 // If the instruction simplified to a constant, there is no cost to this 812 // instruction. Visit the instructions using our InstVisitor to account for 813 // all of the per-instruction logic. The visit tree returns true if we 814 // consumed the instruction in any way, and false if the instruction's base 815 // cost should count against inlining. 816 if (Base::visit(I)) 817 ++NumInstructionsSimplified; 818 else 819 Cost += InlineConstants::InstrCost; 820 821 // If the visit this instruction detected an uninlinable pattern, abort. 822 if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca) 823 return false; 824 825 // If the caller is a recursive function then we don't want to inline 826 // functions which allocate a lot of stack space because it would increase 827 // the caller stack usage dramatically. 828 if (IsCallerRecursive && 829 AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller) 830 return false; 831 832 if (NumVectorInstructions > NumInstructions/2) 833 VectorBonus = FiftyPercentVectorBonus; 834 else if (NumVectorInstructions > NumInstructions/10) 835 VectorBonus = TenPercentVectorBonus; 836 else 837 VectorBonus = 0; 838 839 // Check if we've past the threshold so we don't spin in huge basic 840 // blocks that will never inline. 841 if (Cost > (Threshold + VectorBonus)) 842 return false; 843 } 844 845 return true; 846} 847 848/// \brief Compute the base pointer and cumulative constant offsets for V. 849/// 850/// This strips all constant offsets off of V, leaving it the base pointer, and 851/// accumulates the total constant offset applied in the returned constant. It 852/// returns 0 if V is not a pointer, and returns the constant '0' if there are 853/// no constant offsets applied. 854ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) { 855 if (!TD || !V->getType()->isPointerTy()) 856 return 0; 857 858 unsigned IntPtrWidth = TD->getPointerSizeInBits(); 859 APInt Offset = APInt::getNullValue(IntPtrWidth); 860 861 // Even though we don't look through PHI nodes, we could be called on an 862 // instruction in an unreachable block, which may be on a cycle. 863 SmallPtrSet<Value *, 4> Visited; 864 Visited.insert(V); 865 do { 866 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 867 if (!GEP->isInBounds() || !accumulateGEPOffset(*GEP, Offset)) 868 return 0; 869 V = GEP->getPointerOperand(); 870 } else if (Operator::getOpcode(V) == Instruction::BitCast) { 871 V = cast<Operator>(V)->getOperand(0); 872 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 873 if (GA->mayBeOverridden()) 874 break; 875 V = GA->getAliasee(); 876 } else { 877 break; 878 } 879 assert(V->getType()->isPointerTy() && "Unexpected operand type!"); 880 } while (Visited.insert(V)); 881 882 Type *IntPtrTy = TD->getIntPtrType(V->getContext()); 883 return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset)); 884} 885 886/// \brief Analyze a call site for potential inlining. 887/// 888/// Returns true if inlining this call is viable, and false if it is not 889/// viable. It computes the cost and adjusts the threshold based on numerous 890/// factors and heuristics. If this method returns false but the computed cost 891/// is below the computed threshold, then inlining was forcibly disabled by 892/// some artifact of the routine. 893bool CallAnalyzer::analyzeCall(CallSite CS) { 894 ++NumCallsAnalyzed; 895 896 // Track whether the post-inlining function would have more than one basic 897 // block. A single basic block is often intended for inlining. Balloon the 898 // threshold by 50% until we pass the single-BB phase. 899 bool SingleBB = true; 900 int SingleBBBonus = Threshold / 2; 901 Threshold += SingleBBBonus; 902 903 // Perform some tweaks to the cost and threshold based on the direct 904 // callsite information. 905 906 // We want to more aggressively inline vector-dense kernels, so up the 907 // threshold, and we'll lower it if the % of vector instructions gets too 908 // low. 909 assert(NumInstructions == 0); 910 assert(NumVectorInstructions == 0); 911 FiftyPercentVectorBonus = Threshold; 912 TenPercentVectorBonus = Threshold / 2; 913 914 // Give out bonuses per argument, as the instructions setting them up will 915 // be gone after inlining. 916 for (unsigned I = 0, E = CS.arg_size(); I != E; ++I) { 917 if (TD && CS.isByValArgument(I)) { 918 // We approximate the number of loads and stores needed by dividing the 919 // size of the byval type by the target's pointer size. 920 PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType()); 921 unsigned TypeSize = TD->getTypeSizeInBits(PTy->getElementType()); 922 unsigned PointerSize = TD->getPointerSizeInBits(); 923 // Ceiling division. 924 unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize; 925 926 // If it generates more than 8 stores it is likely to be expanded as an 927 // inline memcpy so we take that as an upper bound. Otherwise we assume 928 // one load and one store per word copied. 929 // FIXME: The maxStoresPerMemcpy setting from the target should be used 930 // here instead of a magic number of 8, but it's not available via 931 // DataLayout. 932 NumStores = std::min(NumStores, 8U); 933 934 Cost -= 2 * NumStores * InlineConstants::InstrCost; 935 } else { 936 // For non-byval arguments subtract off one instruction per call 937 // argument. 938 Cost -= InlineConstants::InstrCost; 939 } 940 } 941 942 // If there is only one call of the function, and it has internal linkage, 943 // the cost of inlining it drops dramatically. 944 bool OnlyOneCallAndLocalLinkage = F.hasLocalLinkage() && F.hasOneUse() && 945 &F == CS.getCalledFunction(); 946 if (OnlyOneCallAndLocalLinkage) 947 Cost += InlineConstants::LastCallToStaticBonus; 948 949 // If the instruction after the call, or if the normal destination of the 950 // invoke is an unreachable instruction, the function is noreturn. As such, 951 // there is little point in inlining this unless there is literally zero 952 // cost. 953 Instruction *Instr = CS.getInstruction(); 954 if (InvokeInst *II = dyn_cast<InvokeInst>(Instr)) { 955 if (isa<UnreachableInst>(II->getNormalDest()->begin())) 956 Threshold = 1; 957 } else if (isa<UnreachableInst>(++BasicBlock::iterator(Instr))) 958 Threshold = 1; 959 960 // If this function uses the coldcc calling convention, prefer not to inline 961 // it. 962 if (F.getCallingConv() == CallingConv::Cold) 963 Cost += InlineConstants::ColdccPenalty; 964 965 // Check if we're done. This can happen due to bonuses and penalties. 966 if (Cost > Threshold) 967 return false; 968 969 if (F.empty()) 970 return true; 971 972 Function *Caller = CS.getInstruction()->getParent()->getParent(); 973 // Check if the caller function is recursive itself. 974 for (Value::use_iterator U = Caller->use_begin(), E = Caller->use_end(); 975 U != E; ++U) { 976 CallSite Site(cast<Value>(*U)); 977 if (!Site) 978 continue; 979 Instruction *I = Site.getInstruction(); 980 if (I->getParent()->getParent() == Caller) { 981 IsCallerRecursive = true; 982 break; 983 } 984 } 985 986 // Track whether we've seen a return instruction. The first return 987 // instruction is free, as at least one will usually disappear in inlining. 988 bool HasReturn = false; 989 990 // Populate our simplified values by mapping from function arguments to call 991 // arguments with known important simplifications. 992 CallSite::arg_iterator CAI = CS.arg_begin(); 993 for (Function::arg_iterator FAI = F.arg_begin(), FAE = F.arg_end(); 994 FAI != FAE; ++FAI, ++CAI) { 995 assert(CAI != CS.arg_end()); 996 if (Constant *C = dyn_cast<Constant>(CAI)) 997 SimplifiedValues[FAI] = C; 998 999 Value *PtrArg = *CAI; 1000 if (ConstantInt *C = stripAndComputeInBoundsConstantOffsets(PtrArg)) { 1001 ConstantOffsetPtrs[FAI] = std::make_pair(PtrArg, C->getValue()); 1002 1003 // We can SROA any pointer arguments derived from alloca instructions. 1004 if (isa<AllocaInst>(PtrArg)) { 1005 SROAArgValues[FAI] = PtrArg; 1006 SROAArgCosts[PtrArg] = 0; 1007 } 1008 } 1009 } 1010 NumConstantArgs = SimplifiedValues.size(); 1011 NumConstantOffsetPtrArgs = ConstantOffsetPtrs.size(); 1012 NumAllocaArgs = SROAArgValues.size(); 1013 1014 // The worklist of live basic blocks in the callee *after* inlining. We avoid 1015 // adding basic blocks of the callee which can be proven to be dead for this 1016 // particular call site in order to get more accurate cost estimates. This 1017 // requires a somewhat heavyweight iteration pattern: we need to walk the 1018 // basic blocks in a breadth-first order as we insert live successors. To 1019 // accomplish this, prioritizing for small iterations because we exit after 1020 // crossing our threshold, we use a small-size optimized SetVector. 1021 typedef SetVector<BasicBlock *, SmallVector<BasicBlock *, 16>, 1022 SmallPtrSet<BasicBlock *, 16> > BBSetVector; 1023 BBSetVector BBWorklist; 1024 BBWorklist.insert(&F.getEntryBlock()); 1025 // Note that we *must not* cache the size, this loop grows the worklist. 1026 for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) { 1027 // Bail out the moment we cross the threshold. This means we'll under-count 1028 // the cost, but only when undercounting doesn't matter. 1029 if (Cost > (Threshold + VectorBonus)) 1030 break; 1031 1032 BasicBlock *BB = BBWorklist[Idx]; 1033 if (BB->empty()) 1034 continue; 1035 1036 // Handle the terminator cost here where we can track returns and other 1037 // function-wide constructs. 1038 TerminatorInst *TI = BB->getTerminator(); 1039 1040 // We never want to inline functions that contain an indirectbr. This is 1041 // incorrect because all the blockaddress's (in static global initializers 1042 // for example) would be referring to the original function, and this 1043 // indirect jump would jump from the inlined copy of the function into the 1044 // original function which is extremely undefined behavior. 1045 // FIXME: This logic isn't really right; we can safely inline functions 1046 // with indirectbr's as long as no other function or global references the 1047 // blockaddress of a block within the current function. And as a QOI issue, 1048 // if someone is using a blockaddress without an indirectbr, and that 1049 // reference somehow ends up in another function or global, we probably 1050 // don't want to inline this function. 1051 if (isa<IndirectBrInst>(TI)) 1052 return false; 1053 1054 if (!HasReturn && isa<ReturnInst>(TI)) 1055 HasReturn = true; 1056 else 1057 Cost += InlineConstants::InstrCost; 1058 1059 // Analyze the cost of this block. If we blow through the threshold, this 1060 // returns false, and we can bail on out. 1061 if (!analyzeBlock(BB)) { 1062 if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca) 1063 return false; 1064 1065 // If the caller is a recursive function then we don't want to inline 1066 // functions which allocate a lot of stack space because it would increase 1067 // the caller stack usage dramatically. 1068 if (IsCallerRecursive && 1069 AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller) 1070 return false; 1071 1072 break; 1073 } 1074 1075 // Add in the live successors by first checking whether we have terminator 1076 // that may be simplified based on the values simplified by this call. 1077 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { 1078 if (BI->isConditional()) { 1079 Value *Cond = BI->getCondition(); 1080 if (ConstantInt *SimpleCond 1081 = dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) { 1082 BBWorklist.insert(BI->getSuccessor(SimpleCond->isZero() ? 1 : 0)); 1083 continue; 1084 } 1085 } 1086 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) { 1087 Value *Cond = SI->getCondition(); 1088 if (ConstantInt *SimpleCond 1089 = dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) { 1090 BBWorklist.insert(SI->findCaseValue(SimpleCond).getCaseSuccessor()); 1091 continue; 1092 } 1093 } 1094 1095 // If we're unable to select a particular successor, just count all of 1096 // them. 1097 for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize; 1098 ++TIdx) 1099 BBWorklist.insert(TI->getSuccessor(TIdx)); 1100 1101 // If we had any successors at this point, than post-inlining is likely to 1102 // have them as well. Note that we assume any basic blocks which existed 1103 // due to branches or switches which folded above will also fold after 1104 // inlining. 1105 if (SingleBB && TI->getNumSuccessors() > 1) { 1106 // Take off the bonus we applied to the threshold. 1107 Threshold -= SingleBBBonus; 1108 SingleBB = false; 1109 } 1110 } 1111 1112 // If this is a noduplicate call, we can still inline as long as 1113 // inlining this would cause the removal of the caller (so the instruction 1114 // is not actually duplicated, just moved). 1115 if (!OnlyOneCallAndLocalLinkage && ContainsNoDuplicateCall) 1116 return false; 1117 1118 Threshold += VectorBonus; 1119 1120 return Cost < Threshold; 1121} 1122 1123#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1124/// \brief Dump stats about this call's analysis. 1125void CallAnalyzer::dump() { 1126#define DEBUG_PRINT_STAT(x) llvm::dbgs() << " " #x ": " << x << "\n" 1127 DEBUG_PRINT_STAT(NumConstantArgs); 1128 DEBUG_PRINT_STAT(NumConstantOffsetPtrArgs); 1129 DEBUG_PRINT_STAT(NumAllocaArgs); 1130 DEBUG_PRINT_STAT(NumConstantPtrCmps); 1131 DEBUG_PRINT_STAT(NumConstantPtrDiffs); 1132 DEBUG_PRINT_STAT(NumInstructionsSimplified); 1133 DEBUG_PRINT_STAT(SROACostSavings); 1134 DEBUG_PRINT_STAT(SROACostSavingsLost); 1135 DEBUG_PRINT_STAT(ContainsNoDuplicateCall); 1136#undef DEBUG_PRINT_STAT 1137} 1138#endif 1139 1140INITIALIZE_PASS_BEGIN(InlineCostAnalysis, "inline-cost", "Inline Cost Analysis", 1141 true, true) 1142INITIALIZE_AG_DEPENDENCY(TargetTransformInfo) 1143INITIALIZE_PASS_END(InlineCostAnalysis, "inline-cost", "Inline Cost Analysis", 1144 true, true) 1145 1146char InlineCostAnalysis::ID = 0; 1147 1148InlineCostAnalysis::InlineCostAnalysis() : CallGraphSCCPass(ID), TD(0) {} 1149 1150InlineCostAnalysis::~InlineCostAnalysis() {} 1151 1152void InlineCostAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { 1153 AU.setPreservesAll(); 1154 AU.addRequired<TargetTransformInfo>(); 1155 CallGraphSCCPass::getAnalysisUsage(AU); 1156} 1157 1158bool InlineCostAnalysis::runOnSCC(CallGraphSCC &SCC) { 1159 TD = getAnalysisIfAvailable<DataLayout>(); 1160 TTI = &getAnalysis<TargetTransformInfo>(); 1161 return false; 1162} 1163 1164InlineCost InlineCostAnalysis::getInlineCost(CallSite CS, int Threshold) { 1165 return getInlineCost(CS, CS.getCalledFunction(), Threshold); 1166} 1167 1168InlineCost InlineCostAnalysis::getInlineCost(CallSite CS, Function *Callee, 1169 int Threshold) { 1170 // Cannot inline indirect calls. 1171 if (!Callee) 1172 return llvm::InlineCost::getNever(); 1173 1174 // Calls to functions with always-inline attributes should be inlined 1175 // whenever possible. 1176 if (Callee->getAttributes().hasAttribute(AttributeSet::FunctionIndex, 1177 Attribute::AlwaysInline)) { 1178 if (isInlineViable(*Callee)) 1179 return llvm::InlineCost::getAlways(); 1180 return llvm::InlineCost::getNever(); 1181 } 1182 1183 // Don't inline functions which can be redefined at link-time to mean 1184 // something else. Don't inline functions marked noinline or call sites 1185 // marked noinline. 1186 if (Callee->mayBeOverridden() || 1187 Callee->getAttributes().hasAttribute(AttributeSet::FunctionIndex, 1188 Attribute::NoInline) || 1189 CS.isNoInline()) 1190 return llvm::InlineCost::getNever(); 1191 1192 DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName() 1193 << "...\n"); 1194 1195 CallAnalyzer CA(TD, *TTI, *Callee, Threshold); 1196 bool ShouldInline = CA.analyzeCall(CS); 1197 1198 DEBUG(CA.dump()); 1199 1200 // Check if there was a reason to force inlining or no inlining. 1201 if (!ShouldInline && CA.getCost() < CA.getThreshold()) 1202 return InlineCost::getNever(); 1203 if (ShouldInline && CA.getCost() >= CA.getThreshold()) 1204 return InlineCost::getAlways(); 1205 1206 return llvm::InlineCost::get(CA.getCost(), CA.getThreshold()); 1207} 1208 1209bool InlineCostAnalysis::isInlineViable(Function &F) { 1210 bool ReturnsTwice = 1211 F.getAttributes().hasAttribute(AttributeSet::FunctionIndex, 1212 Attribute::ReturnsTwice); 1213 for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) { 1214 // Disallow inlining of functions which contain an indirect branch. 1215 if (isa<IndirectBrInst>(BI->getTerminator())) 1216 return false; 1217 1218 for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE; 1219 ++II) { 1220 CallSite CS(II); 1221 if (!CS) 1222 continue; 1223 1224 // Disallow recursive calls. 1225 if (&F == CS.getCalledFunction()) 1226 return false; 1227 1228 // Disallow calls which expose returns-twice to a function not previously 1229 // attributed as such. 1230 if (!ReturnsTwice && CS.isCall() && 1231 cast<CallInst>(CS.getInstruction())->canReturnTwice()) 1232 return false; 1233 } 1234 } 1235 1236 return true; 1237} 1238