InlineCost.cpp revision 36b56886974eae4f9c5ebc96befd3e7bfe5de338
1//===- InlineCost.cpp - Cost analysis for inliner -------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements inline cost analysis. 11// 12//===----------------------------------------------------------------------===// 13 14#define DEBUG_TYPE "inline-cost" 15#include "llvm/Analysis/InlineCost.h" 16#include "llvm/ADT/STLExtras.h" 17#include "llvm/ADT/SetVector.h" 18#include "llvm/ADT/SmallPtrSet.h" 19#include "llvm/ADT/SmallVector.h" 20#include "llvm/ADT/Statistic.h" 21#include "llvm/Analysis/ConstantFolding.h" 22#include "llvm/Analysis/InstructionSimplify.h" 23#include "llvm/Analysis/TargetTransformInfo.h" 24#include "llvm/IR/CallSite.h" 25#include "llvm/IR/CallingConv.h" 26#include "llvm/IR/DataLayout.h" 27#include "llvm/IR/GetElementPtrTypeIterator.h" 28#include "llvm/IR/GlobalAlias.h" 29#include "llvm/IR/InstVisitor.h" 30#include "llvm/IR/IntrinsicInst.h" 31#include "llvm/IR/Operator.h" 32#include "llvm/Support/Debug.h" 33#include "llvm/Support/raw_ostream.h" 34 35using namespace llvm; 36 37STATISTIC(NumCallsAnalyzed, "Number of call sites analyzed"); 38 39namespace { 40 41class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> { 42 typedef InstVisitor<CallAnalyzer, bool> Base; 43 friend class InstVisitor<CallAnalyzer, bool>; 44 45 // DataLayout if available, or null. 46 const DataLayout *const DL; 47 48 /// The TargetTransformInfo available for this compilation. 49 const TargetTransformInfo &TTI; 50 51 // The called function. 52 Function &F; 53 54 int Threshold; 55 int Cost; 56 57 bool IsCallerRecursive; 58 bool IsRecursiveCall; 59 bool ExposesReturnsTwice; 60 bool HasDynamicAlloca; 61 bool ContainsNoDuplicateCall; 62 bool HasReturn; 63 bool HasIndirectBr; 64 65 /// Number of bytes allocated statically by the callee. 66 uint64_t AllocatedSize; 67 unsigned NumInstructions, NumVectorInstructions; 68 int FiftyPercentVectorBonus, TenPercentVectorBonus; 69 int VectorBonus; 70 71 // While we walk the potentially-inlined instructions, we build up and 72 // maintain a mapping of simplified values specific to this callsite. The 73 // idea is to propagate any special information we have about arguments to 74 // this call through the inlinable section of the function, and account for 75 // likely simplifications post-inlining. The most important aspect we track 76 // is CFG altering simplifications -- when we prove a basic block dead, that 77 // can cause dramatic shifts in the cost of inlining a function. 78 DenseMap<Value *, Constant *> SimplifiedValues; 79 80 // Keep track of the values which map back (through function arguments) to 81 // allocas on the caller stack which could be simplified through SROA. 82 DenseMap<Value *, Value *> SROAArgValues; 83 84 // The mapping of caller Alloca values to their accumulated cost savings. If 85 // we have to disable SROA for one of the allocas, this tells us how much 86 // cost must be added. 87 DenseMap<Value *, int> SROAArgCosts; 88 89 // Keep track of values which map to a pointer base and constant offset. 90 DenseMap<Value *, std::pair<Value *, APInt> > ConstantOffsetPtrs; 91 92 // Custom simplification helper routines. 93 bool isAllocaDerivedArg(Value *V); 94 bool lookupSROAArgAndCost(Value *V, Value *&Arg, 95 DenseMap<Value *, int>::iterator &CostIt); 96 void disableSROA(DenseMap<Value *, int>::iterator CostIt); 97 void disableSROA(Value *V); 98 void accumulateSROACost(DenseMap<Value *, int>::iterator CostIt, 99 int InstructionCost); 100 bool handleSROACandidate(bool IsSROAValid, 101 DenseMap<Value *, int>::iterator CostIt, 102 int InstructionCost); 103 bool isGEPOffsetConstant(GetElementPtrInst &GEP); 104 bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset); 105 bool simplifyCallSite(Function *F, CallSite CS); 106 ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V); 107 108 // Custom analysis routines. 109 bool analyzeBlock(BasicBlock *BB); 110 111 // Disable several entry points to the visitor so we don't accidentally use 112 // them by declaring but not defining them here. 113 void visit(Module *); void visit(Module &); 114 void visit(Function *); void visit(Function &); 115 void visit(BasicBlock *); void visit(BasicBlock &); 116 117 // Provide base case for our instruction visit. 118 bool visitInstruction(Instruction &I); 119 120 // Our visit overrides. 121 bool visitAlloca(AllocaInst &I); 122 bool visitPHI(PHINode &I); 123 bool visitGetElementPtr(GetElementPtrInst &I); 124 bool visitBitCast(BitCastInst &I); 125 bool visitPtrToInt(PtrToIntInst &I); 126 bool visitIntToPtr(IntToPtrInst &I); 127 bool visitCastInst(CastInst &I); 128 bool visitUnaryInstruction(UnaryInstruction &I); 129 bool visitCmpInst(CmpInst &I); 130 bool visitSub(BinaryOperator &I); 131 bool visitBinaryOperator(BinaryOperator &I); 132 bool visitLoad(LoadInst &I); 133 bool visitStore(StoreInst &I); 134 bool visitExtractValue(ExtractValueInst &I); 135 bool visitInsertValue(InsertValueInst &I); 136 bool visitCallSite(CallSite CS); 137 bool visitReturnInst(ReturnInst &RI); 138 bool visitBranchInst(BranchInst &BI); 139 bool visitSwitchInst(SwitchInst &SI); 140 bool visitIndirectBrInst(IndirectBrInst &IBI); 141 bool visitResumeInst(ResumeInst &RI); 142 bool visitUnreachableInst(UnreachableInst &I); 143 144public: 145 CallAnalyzer(const DataLayout *DL, const TargetTransformInfo &TTI, 146 Function &Callee, int Threshold) 147 : DL(DL), TTI(TTI), F(Callee), Threshold(Threshold), Cost(0), 148 IsCallerRecursive(false), IsRecursiveCall(false), 149 ExposesReturnsTwice(false), HasDynamicAlloca(false), 150 ContainsNoDuplicateCall(false), HasReturn(false), HasIndirectBr(false), 151 AllocatedSize(0), NumInstructions(0), NumVectorInstructions(0), 152 FiftyPercentVectorBonus(0), TenPercentVectorBonus(0), VectorBonus(0), 153 NumConstantArgs(0), NumConstantOffsetPtrArgs(0), NumAllocaArgs(0), 154 NumConstantPtrCmps(0), NumConstantPtrDiffs(0), 155 NumInstructionsSimplified(0), SROACostSavings(0), 156 SROACostSavingsLost(0) {} 157 158 bool analyzeCall(CallSite CS); 159 160 int getThreshold() { return Threshold; } 161 int getCost() { return Cost; } 162 163 // Keep a bunch of stats about the cost savings found so we can print them 164 // out when debugging. 165 unsigned NumConstantArgs; 166 unsigned NumConstantOffsetPtrArgs; 167 unsigned NumAllocaArgs; 168 unsigned NumConstantPtrCmps; 169 unsigned NumConstantPtrDiffs; 170 unsigned NumInstructionsSimplified; 171 unsigned SROACostSavings; 172 unsigned SROACostSavingsLost; 173 174 void dump(); 175}; 176 177} // namespace 178 179/// \brief Test whether the given value is an Alloca-derived function argument. 180bool CallAnalyzer::isAllocaDerivedArg(Value *V) { 181 return SROAArgValues.count(V); 182} 183 184/// \brief Lookup the SROA-candidate argument and cost iterator which V maps to. 185/// Returns false if V does not map to a SROA-candidate. 186bool CallAnalyzer::lookupSROAArgAndCost( 187 Value *V, Value *&Arg, DenseMap<Value *, int>::iterator &CostIt) { 188 if (SROAArgValues.empty() || SROAArgCosts.empty()) 189 return false; 190 191 DenseMap<Value *, Value *>::iterator ArgIt = SROAArgValues.find(V); 192 if (ArgIt == SROAArgValues.end()) 193 return false; 194 195 Arg = ArgIt->second; 196 CostIt = SROAArgCosts.find(Arg); 197 return CostIt != SROAArgCosts.end(); 198} 199 200/// \brief Disable SROA for the candidate marked by this cost iterator. 201/// 202/// This marks the candidate as no longer viable for SROA, and adds the cost 203/// savings associated with it back into the inline cost measurement. 204void CallAnalyzer::disableSROA(DenseMap<Value *, int>::iterator CostIt) { 205 // If we're no longer able to perform SROA we need to undo its cost savings 206 // and prevent subsequent analysis. 207 Cost += CostIt->second; 208 SROACostSavings -= CostIt->second; 209 SROACostSavingsLost += CostIt->second; 210 SROAArgCosts.erase(CostIt); 211} 212 213/// \brief If 'V' maps to a SROA candidate, disable SROA for it. 214void CallAnalyzer::disableSROA(Value *V) { 215 Value *SROAArg; 216 DenseMap<Value *, int>::iterator CostIt; 217 if (lookupSROAArgAndCost(V, SROAArg, CostIt)) 218 disableSROA(CostIt); 219} 220 221/// \brief Accumulate the given cost for a particular SROA candidate. 222void CallAnalyzer::accumulateSROACost(DenseMap<Value *, int>::iterator CostIt, 223 int InstructionCost) { 224 CostIt->second += InstructionCost; 225 SROACostSavings += InstructionCost; 226} 227 228/// \brief Helper for the common pattern of handling a SROA candidate. 229/// Either accumulates the cost savings if the SROA remains valid, or disables 230/// SROA for the candidate. 231bool CallAnalyzer::handleSROACandidate(bool IsSROAValid, 232 DenseMap<Value *, int>::iterator CostIt, 233 int InstructionCost) { 234 if (IsSROAValid) { 235 accumulateSROACost(CostIt, InstructionCost); 236 return true; 237 } 238 239 disableSROA(CostIt); 240 return false; 241} 242 243/// \brief Check whether a GEP's indices are all constant. 244/// 245/// Respects any simplified values known during the analysis of this callsite. 246bool CallAnalyzer::isGEPOffsetConstant(GetElementPtrInst &GEP) { 247 for (User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end(); I != E; ++I) 248 if (!isa<Constant>(*I) && !SimplifiedValues.lookup(*I)) 249 return false; 250 251 return true; 252} 253 254/// \brief Accumulate a constant GEP offset into an APInt if possible. 255/// 256/// Returns false if unable to compute the offset for any reason. Respects any 257/// simplified values known during the analysis of this callsite. 258bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) { 259 if (!DL) 260 return false; 261 262 unsigned IntPtrWidth = DL->getPointerSizeInBits(); 263 assert(IntPtrWidth == Offset.getBitWidth()); 264 265 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); 266 GTI != GTE; ++GTI) { 267 ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand()); 268 if (!OpC) 269 if (Constant *SimpleOp = SimplifiedValues.lookup(GTI.getOperand())) 270 OpC = dyn_cast<ConstantInt>(SimpleOp); 271 if (!OpC) 272 return false; 273 if (OpC->isZero()) continue; 274 275 // Handle a struct index, which adds its field offset to the pointer. 276 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 277 unsigned ElementIdx = OpC->getZExtValue(); 278 const StructLayout *SL = DL->getStructLayout(STy); 279 Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx)); 280 continue; 281 } 282 283 APInt TypeSize(IntPtrWidth, DL->getTypeAllocSize(GTI.getIndexedType())); 284 Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize; 285 } 286 return true; 287} 288 289bool CallAnalyzer::visitAlloca(AllocaInst &I) { 290 // FIXME: Check whether inlining will turn a dynamic alloca into a static 291 // alloca, and handle that case. 292 293 // Accumulate the allocated size. 294 if (I.isStaticAlloca()) { 295 Type *Ty = I.getAllocatedType(); 296 AllocatedSize += (DL ? DL->getTypeAllocSize(Ty) : 297 Ty->getPrimitiveSizeInBits()); 298 } 299 300 // We will happily inline static alloca instructions. 301 if (I.isStaticAlloca()) 302 return Base::visitAlloca(I); 303 304 // FIXME: This is overly conservative. Dynamic allocas are inefficient for 305 // a variety of reasons, and so we would like to not inline them into 306 // functions which don't currently have a dynamic alloca. This simply 307 // disables inlining altogether in the presence of a dynamic alloca. 308 HasDynamicAlloca = true; 309 return false; 310} 311 312bool CallAnalyzer::visitPHI(PHINode &I) { 313 // FIXME: We should potentially be tracking values through phi nodes, 314 // especially when they collapse to a single value due to deleted CFG edges 315 // during inlining. 316 317 // FIXME: We need to propagate SROA *disabling* through phi nodes, even 318 // though we don't want to propagate it's bonuses. The idea is to disable 319 // SROA if it *might* be used in an inappropriate manner. 320 321 // Phi nodes are always zero-cost. 322 return true; 323} 324 325bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) { 326 Value *SROAArg; 327 DenseMap<Value *, int>::iterator CostIt; 328 bool SROACandidate = lookupSROAArgAndCost(I.getPointerOperand(), 329 SROAArg, CostIt); 330 331 // Try to fold GEPs of constant-offset call site argument pointers. This 332 // requires target data and inbounds GEPs. 333 if (DL && I.isInBounds()) { 334 // Check if we have a base + offset for the pointer. 335 Value *Ptr = I.getPointerOperand(); 336 std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Ptr); 337 if (BaseAndOffset.first) { 338 // Check if the offset of this GEP is constant, and if so accumulate it 339 // into Offset. 340 if (!accumulateGEPOffset(cast<GEPOperator>(I), BaseAndOffset.second)) { 341 // Non-constant GEPs aren't folded, and disable SROA. 342 if (SROACandidate) 343 disableSROA(CostIt); 344 return false; 345 } 346 347 // Add the result as a new mapping to Base + Offset. 348 ConstantOffsetPtrs[&I] = BaseAndOffset; 349 350 // Also handle SROA candidates here, we already know that the GEP is 351 // all-constant indexed. 352 if (SROACandidate) 353 SROAArgValues[&I] = SROAArg; 354 355 return true; 356 } 357 } 358 359 if (isGEPOffsetConstant(I)) { 360 if (SROACandidate) 361 SROAArgValues[&I] = SROAArg; 362 363 // Constant GEPs are modeled as free. 364 return true; 365 } 366 367 // Variable GEPs will require math and will disable SROA. 368 if (SROACandidate) 369 disableSROA(CostIt); 370 return false; 371} 372 373bool CallAnalyzer::visitBitCast(BitCastInst &I) { 374 // Propagate constants through bitcasts. 375 Constant *COp = dyn_cast<Constant>(I.getOperand(0)); 376 if (!COp) 377 COp = SimplifiedValues.lookup(I.getOperand(0)); 378 if (COp) 379 if (Constant *C = ConstantExpr::getBitCast(COp, I.getType())) { 380 SimplifiedValues[&I] = C; 381 return true; 382 } 383 384 // Track base/offsets through casts 385 std::pair<Value *, APInt> BaseAndOffset 386 = ConstantOffsetPtrs.lookup(I.getOperand(0)); 387 // Casts don't change the offset, just wrap it up. 388 if (BaseAndOffset.first) 389 ConstantOffsetPtrs[&I] = BaseAndOffset; 390 391 // Also look for SROA candidates here. 392 Value *SROAArg; 393 DenseMap<Value *, int>::iterator CostIt; 394 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) 395 SROAArgValues[&I] = SROAArg; 396 397 // Bitcasts are always zero cost. 398 return true; 399} 400 401bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) { 402 const DataLayout *DL = I.getDataLayout(); 403 // Propagate constants through ptrtoint. 404 Constant *COp = dyn_cast<Constant>(I.getOperand(0)); 405 if (!COp) 406 COp = SimplifiedValues.lookup(I.getOperand(0)); 407 if (COp) 408 if (Constant *C = ConstantExpr::getPtrToInt(COp, I.getType())) { 409 SimplifiedValues[&I] = C; 410 return true; 411 } 412 413 // Track base/offset pairs when converted to a plain integer provided the 414 // integer is large enough to represent the pointer. 415 unsigned IntegerSize = I.getType()->getScalarSizeInBits(); 416 if (DL && IntegerSize >= DL->getPointerSizeInBits()) { 417 std::pair<Value *, APInt> BaseAndOffset 418 = ConstantOffsetPtrs.lookup(I.getOperand(0)); 419 if (BaseAndOffset.first) 420 ConstantOffsetPtrs[&I] = BaseAndOffset; 421 } 422 423 // This is really weird. Technically, ptrtoint will disable SROA. However, 424 // unless that ptrtoint is *used* somewhere in the live basic blocks after 425 // inlining, it will be nuked, and SROA should proceed. All of the uses which 426 // would block SROA would also block SROA if applied directly to a pointer, 427 // and so we can just add the integer in here. The only places where SROA is 428 // preserved either cannot fire on an integer, or won't in-and-of themselves 429 // disable SROA (ext) w/o some later use that we would see and disable. 430 Value *SROAArg; 431 DenseMap<Value *, int>::iterator CostIt; 432 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) 433 SROAArgValues[&I] = SROAArg; 434 435 return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I); 436} 437 438bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) { 439 const DataLayout *DL = I.getDataLayout(); 440 // Propagate constants through ptrtoint. 441 Constant *COp = dyn_cast<Constant>(I.getOperand(0)); 442 if (!COp) 443 COp = SimplifiedValues.lookup(I.getOperand(0)); 444 if (COp) 445 if (Constant *C = ConstantExpr::getIntToPtr(COp, I.getType())) { 446 SimplifiedValues[&I] = C; 447 return true; 448 } 449 450 // Track base/offset pairs when round-tripped through a pointer without 451 // modifications provided the integer is not too large. 452 Value *Op = I.getOperand(0); 453 unsigned IntegerSize = Op->getType()->getScalarSizeInBits(); 454 if (DL && IntegerSize <= DL->getPointerSizeInBits()) { 455 std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op); 456 if (BaseAndOffset.first) 457 ConstantOffsetPtrs[&I] = BaseAndOffset; 458 } 459 460 // "Propagate" SROA here in the same manner as we do for ptrtoint above. 461 Value *SROAArg; 462 DenseMap<Value *, int>::iterator CostIt; 463 if (lookupSROAArgAndCost(Op, SROAArg, CostIt)) 464 SROAArgValues[&I] = SROAArg; 465 466 return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I); 467} 468 469bool CallAnalyzer::visitCastInst(CastInst &I) { 470 // Propagate constants through ptrtoint. 471 Constant *COp = dyn_cast<Constant>(I.getOperand(0)); 472 if (!COp) 473 COp = SimplifiedValues.lookup(I.getOperand(0)); 474 if (COp) 475 if (Constant *C = ConstantExpr::getCast(I.getOpcode(), COp, I.getType())) { 476 SimplifiedValues[&I] = C; 477 return true; 478 } 479 480 // Disable SROA in the face of arbitrary casts we don't whitelist elsewhere. 481 disableSROA(I.getOperand(0)); 482 483 return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I); 484} 485 486bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) { 487 Value *Operand = I.getOperand(0); 488 Constant *COp = dyn_cast<Constant>(Operand); 489 if (!COp) 490 COp = SimplifiedValues.lookup(Operand); 491 if (COp) 492 if (Constant *C = ConstantFoldInstOperands(I.getOpcode(), I.getType(), 493 COp, DL)) { 494 SimplifiedValues[&I] = C; 495 return true; 496 } 497 498 // Disable any SROA on the argument to arbitrary unary operators. 499 disableSROA(Operand); 500 501 return false; 502} 503 504bool CallAnalyzer::visitCmpInst(CmpInst &I) { 505 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 506 // First try to handle simplified comparisons. 507 if (!isa<Constant>(LHS)) 508 if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS)) 509 LHS = SimpleLHS; 510 if (!isa<Constant>(RHS)) 511 if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS)) 512 RHS = SimpleRHS; 513 if (Constant *CLHS = dyn_cast<Constant>(LHS)) { 514 if (Constant *CRHS = dyn_cast<Constant>(RHS)) 515 if (Constant *C = ConstantExpr::getCompare(I.getPredicate(), CLHS, CRHS)) { 516 SimplifiedValues[&I] = C; 517 return true; 518 } 519 } 520 521 if (I.getOpcode() == Instruction::FCmp) 522 return false; 523 524 // Otherwise look for a comparison between constant offset pointers with 525 // a common base. 526 Value *LHSBase, *RHSBase; 527 APInt LHSOffset, RHSOffset; 528 std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS); 529 if (LHSBase) { 530 std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS); 531 if (RHSBase && LHSBase == RHSBase) { 532 // We have common bases, fold the icmp to a constant based on the 533 // offsets. 534 Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset); 535 Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset); 536 if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) { 537 SimplifiedValues[&I] = C; 538 ++NumConstantPtrCmps; 539 return true; 540 } 541 } 542 } 543 544 // If the comparison is an equality comparison with null, we can simplify it 545 // for any alloca-derived argument. 546 if (I.isEquality() && isa<ConstantPointerNull>(I.getOperand(1))) 547 if (isAllocaDerivedArg(I.getOperand(0))) { 548 // We can actually predict the result of comparisons between an 549 // alloca-derived value and null. Note that this fires regardless of 550 // SROA firing. 551 bool IsNotEqual = I.getPredicate() == CmpInst::ICMP_NE; 552 SimplifiedValues[&I] = IsNotEqual ? ConstantInt::getTrue(I.getType()) 553 : ConstantInt::getFalse(I.getType()); 554 return true; 555 } 556 557 // Finally check for SROA candidates in comparisons. 558 Value *SROAArg; 559 DenseMap<Value *, int>::iterator CostIt; 560 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) { 561 if (isa<ConstantPointerNull>(I.getOperand(1))) { 562 accumulateSROACost(CostIt, InlineConstants::InstrCost); 563 return true; 564 } 565 566 disableSROA(CostIt); 567 } 568 569 return false; 570} 571 572bool CallAnalyzer::visitSub(BinaryOperator &I) { 573 // Try to handle a special case: we can fold computing the difference of two 574 // constant-related pointers. 575 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 576 Value *LHSBase, *RHSBase; 577 APInt LHSOffset, RHSOffset; 578 std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS); 579 if (LHSBase) { 580 std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS); 581 if (RHSBase && LHSBase == RHSBase) { 582 // We have common bases, fold the subtract to a constant based on the 583 // offsets. 584 Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset); 585 Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset); 586 if (Constant *C = ConstantExpr::getSub(CLHS, CRHS)) { 587 SimplifiedValues[&I] = C; 588 ++NumConstantPtrDiffs; 589 return true; 590 } 591 } 592 } 593 594 // Otherwise, fall back to the generic logic for simplifying and handling 595 // instructions. 596 return Base::visitSub(I); 597} 598 599bool CallAnalyzer::visitBinaryOperator(BinaryOperator &I) { 600 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 601 if (!isa<Constant>(LHS)) 602 if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS)) 603 LHS = SimpleLHS; 604 if (!isa<Constant>(RHS)) 605 if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS)) 606 RHS = SimpleRHS; 607 Value *SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS, DL); 608 if (Constant *C = dyn_cast_or_null<Constant>(SimpleV)) { 609 SimplifiedValues[&I] = C; 610 return true; 611 } 612 613 // Disable any SROA on arguments to arbitrary, unsimplified binary operators. 614 disableSROA(LHS); 615 disableSROA(RHS); 616 617 return false; 618} 619 620bool CallAnalyzer::visitLoad(LoadInst &I) { 621 Value *SROAArg; 622 DenseMap<Value *, int>::iterator CostIt; 623 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) { 624 if (I.isSimple()) { 625 accumulateSROACost(CostIt, InlineConstants::InstrCost); 626 return true; 627 } 628 629 disableSROA(CostIt); 630 } 631 632 return false; 633} 634 635bool CallAnalyzer::visitStore(StoreInst &I) { 636 Value *SROAArg; 637 DenseMap<Value *, int>::iterator CostIt; 638 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) { 639 if (I.isSimple()) { 640 accumulateSROACost(CostIt, InlineConstants::InstrCost); 641 return true; 642 } 643 644 disableSROA(CostIt); 645 } 646 647 return false; 648} 649 650bool CallAnalyzer::visitExtractValue(ExtractValueInst &I) { 651 // Constant folding for extract value is trivial. 652 Constant *C = dyn_cast<Constant>(I.getAggregateOperand()); 653 if (!C) 654 C = SimplifiedValues.lookup(I.getAggregateOperand()); 655 if (C) { 656 SimplifiedValues[&I] = ConstantExpr::getExtractValue(C, I.getIndices()); 657 return true; 658 } 659 660 // SROA can look through these but give them a cost. 661 return false; 662} 663 664bool CallAnalyzer::visitInsertValue(InsertValueInst &I) { 665 // Constant folding for insert value is trivial. 666 Constant *AggC = dyn_cast<Constant>(I.getAggregateOperand()); 667 if (!AggC) 668 AggC = SimplifiedValues.lookup(I.getAggregateOperand()); 669 Constant *InsertedC = dyn_cast<Constant>(I.getInsertedValueOperand()); 670 if (!InsertedC) 671 InsertedC = SimplifiedValues.lookup(I.getInsertedValueOperand()); 672 if (AggC && InsertedC) { 673 SimplifiedValues[&I] = ConstantExpr::getInsertValue(AggC, InsertedC, 674 I.getIndices()); 675 return true; 676 } 677 678 // SROA can look through these but give them a cost. 679 return false; 680} 681 682/// \brief Try to simplify a call site. 683/// 684/// Takes a concrete function and callsite and tries to actually simplify it by 685/// analyzing the arguments and call itself with instsimplify. Returns true if 686/// it has simplified the callsite to some other entity (a constant), making it 687/// free. 688bool CallAnalyzer::simplifyCallSite(Function *F, CallSite CS) { 689 // FIXME: Using the instsimplify logic directly for this is inefficient 690 // because we have to continually rebuild the argument list even when no 691 // simplifications can be performed. Until that is fixed with remapping 692 // inside of instsimplify, directly constant fold calls here. 693 if (!canConstantFoldCallTo(F)) 694 return false; 695 696 // Try to re-map the arguments to constants. 697 SmallVector<Constant *, 4> ConstantArgs; 698 ConstantArgs.reserve(CS.arg_size()); 699 for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); 700 I != E; ++I) { 701 Constant *C = dyn_cast<Constant>(*I); 702 if (!C) 703 C = dyn_cast_or_null<Constant>(SimplifiedValues.lookup(*I)); 704 if (!C) 705 return false; // This argument doesn't map to a constant. 706 707 ConstantArgs.push_back(C); 708 } 709 if (Constant *C = ConstantFoldCall(F, ConstantArgs)) { 710 SimplifiedValues[CS.getInstruction()] = C; 711 return true; 712 } 713 714 return false; 715} 716 717bool CallAnalyzer::visitCallSite(CallSite CS) { 718 if (CS.hasFnAttr(Attribute::ReturnsTwice) && 719 !F.getAttributes().hasAttribute(AttributeSet::FunctionIndex, 720 Attribute::ReturnsTwice)) { 721 // This aborts the entire analysis. 722 ExposesReturnsTwice = true; 723 return false; 724 } 725 if (CS.isCall() && 726 cast<CallInst>(CS.getInstruction())->cannotDuplicate()) 727 ContainsNoDuplicateCall = true; 728 729 if (Function *F = CS.getCalledFunction()) { 730 // When we have a concrete function, first try to simplify it directly. 731 if (simplifyCallSite(F, CS)) 732 return true; 733 734 // Next check if it is an intrinsic we know about. 735 // FIXME: Lift this into part of the InstVisitor. 736 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) { 737 switch (II->getIntrinsicID()) { 738 default: 739 return Base::visitCallSite(CS); 740 741 case Intrinsic::memset: 742 case Intrinsic::memcpy: 743 case Intrinsic::memmove: 744 // SROA can usually chew through these intrinsics, but they aren't free. 745 return false; 746 } 747 } 748 749 if (F == CS.getInstruction()->getParent()->getParent()) { 750 // This flag will fully abort the analysis, so don't bother with anything 751 // else. 752 IsRecursiveCall = true; 753 return false; 754 } 755 756 if (TTI.isLoweredToCall(F)) { 757 // We account for the average 1 instruction per call argument setup 758 // here. 759 Cost += CS.arg_size() * InlineConstants::InstrCost; 760 761 // Everything other than inline ASM will also have a significant cost 762 // merely from making the call. 763 if (!isa<InlineAsm>(CS.getCalledValue())) 764 Cost += InlineConstants::CallPenalty; 765 } 766 767 return Base::visitCallSite(CS); 768 } 769 770 // Otherwise we're in a very special case -- an indirect function call. See 771 // if we can be particularly clever about this. 772 Value *Callee = CS.getCalledValue(); 773 774 // First, pay the price of the argument setup. We account for the average 775 // 1 instruction per call argument setup here. 776 Cost += CS.arg_size() * InlineConstants::InstrCost; 777 778 // Next, check if this happens to be an indirect function call to a known 779 // function in this inline context. If not, we've done all we can. 780 Function *F = dyn_cast_or_null<Function>(SimplifiedValues.lookup(Callee)); 781 if (!F) 782 return Base::visitCallSite(CS); 783 784 // If we have a constant that we are calling as a function, we can peer 785 // through it and see the function target. This happens not infrequently 786 // during devirtualization and so we want to give it a hefty bonus for 787 // inlining, but cap that bonus in the event that inlining wouldn't pan 788 // out. Pretend to inline the function, with a custom threshold. 789 CallAnalyzer CA(DL, TTI, *F, InlineConstants::IndirectCallThreshold); 790 if (CA.analyzeCall(CS)) { 791 // We were able to inline the indirect call! Subtract the cost from the 792 // bonus we want to apply, but don't go below zero. 793 Cost -= std::max(0, InlineConstants::IndirectCallThreshold - CA.getCost()); 794 } 795 796 return Base::visitCallSite(CS); 797} 798 799bool CallAnalyzer::visitReturnInst(ReturnInst &RI) { 800 // At least one return instruction will be free after inlining. 801 bool Free = !HasReturn; 802 HasReturn = true; 803 return Free; 804} 805 806bool CallAnalyzer::visitBranchInst(BranchInst &BI) { 807 // We model unconditional branches as essentially free -- they really 808 // shouldn't exist at all, but handling them makes the behavior of the 809 // inliner more regular and predictable. Interestingly, conditional branches 810 // which will fold away are also free. 811 return BI.isUnconditional() || isa<ConstantInt>(BI.getCondition()) || 812 dyn_cast_or_null<ConstantInt>( 813 SimplifiedValues.lookup(BI.getCondition())); 814} 815 816bool CallAnalyzer::visitSwitchInst(SwitchInst &SI) { 817 // We model unconditional switches as free, see the comments on handling 818 // branches. 819 return isa<ConstantInt>(SI.getCondition()) || 820 dyn_cast_or_null<ConstantInt>( 821 SimplifiedValues.lookup(SI.getCondition())); 822} 823 824bool CallAnalyzer::visitIndirectBrInst(IndirectBrInst &IBI) { 825 // We never want to inline functions that contain an indirectbr. This is 826 // incorrect because all the blockaddress's (in static global initializers 827 // for example) would be referring to the original function, and this 828 // indirect jump would jump from the inlined copy of the function into the 829 // original function which is extremely undefined behavior. 830 // FIXME: This logic isn't really right; we can safely inline functions with 831 // indirectbr's as long as no other function or global references the 832 // blockaddress of a block within the current function. And as a QOI issue, 833 // if someone is using a blockaddress without an indirectbr, and that 834 // reference somehow ends up in another function or global, we probably don't 835 // want to inline this function. 836 HasIndirectBr = true; 837 return false; 838} 839 840bool CallAnalyzer::visitResumeInst(ResumeInst &RI) { 841 // FIXME: It's not clear that a single instruction is an accurate model for 842 // the inline cost of a resume instruction. 843 return false; 844} 845 846bool CallAnalyzer::visitUnreachableInst(UnreachableInst &I) { 847 // FIXME: It might be reasonably to discount the cost of instructions leading 848 // to unreachable as they have the lowest possible impact on both runtime and 849 // code size. 850 return true; // No actual code is needed for unreachable. 851} 852 853bool CallAnalyzer::visitInstruction(Instruction &I) { 854 // Some instructions are free. All of the free intrinsics can also be 855 // handled by SROA, etc. 856 if (TargetTransformInfo::TCC_Free == TTI.getUserCost(&I)) 857 return true; 858 859 // We found something we don't understand or can't handle. Mark any SROA-able 860 // values in the operand list as no longer viable. 861 for (User::op_iterator OI = I.op_begin(), OE = I.op_end(); OI != OE; ++OI) 862 disableSROA(*OI); 863 864 return false; 865} 866 867 868/// \brief Analyze a basic block for its contribution to the inline cost. 869/// 870/// This method walks the analyzer over every instruction in the given basic 871/// block and accounts for their cost during inlining at this callsite. It 872/// aborts early if the threshold has been exceeded or an impossible to inline 873/// construct has been detected. It returns false if inlining is no longer 874/// viable, and true if inlining remains viable. 875bool CallAnalyzer::analyzeBlock(BasicBlock *BB) { 876 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) { 877 // FIXME: Currently, the number of instructions in a function regardless of 878 // our ability to simplify them during inline to constants or dead code, 879 // are actually used by the vector bonus heuristic. As long as that's true, 880 // we have to special case debug intrinsics here to prevent differences in 881 // inlining due to debug symbols. Eventually, the number of unsimplified 882 // instructions shouldn't factor into the cost computation, but until then, 883 // hack around it here. 884 if (isa<DbgInfoIntrinsic>(I)) 885 continue; 886 887 ++NumInstructions; 888 if (isa<ExtractElementInst>(I) || I->getType()->isVectorTy()) 889 ++NumVectorInstructions; 890 891 // If the instruction simplified to a constant, there is no cost to this 892 // instruction. Visit the instructions using our InstVisitor to account for 893 // all of the per-instruction logic. The visit tree returns true if we 894 // consumed the instruction in any way, and false if the instruction's base 895 // cost should count against inlining. 896 if (Base::visit(I)) 897 ++NumInstructionsSimplified; 898 else 899 Cost += InlineConstants::InstrCost; 900 901 // If the visit this instruction detected an uninlinable pattern, abort. 902 if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca || 903 HasIndirectBr) 904 return false; 905 906 // If the caller is a recursive function then we don't want to inline 907 // functions which allocate a lot of stack space because it would increase 908 // the caller stack usage dramatically. 909 if (IsCallerRecursive && 910 AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller) 911 return false; 912 913 if (NumVectorInstructions > NumInstructions/2) 914 VectorBonus = FiftyPercentVectorBonus; 915 else if (NumVectorInstructions > NumInstructions/10) 916 VectorBonus = TenPercentVectorBonus; 917 else 918 VectorBonus = 0; 919 920 // Check if we've past the threshold so we don't spin in huge basic 921 // blocks that will never inline. 922 if (Cost > (Threshold + VectorBonus)) 923 return false; 924 } 925 926 return true; 927} 928 929/// \brief Compute the base pointer and cumulative constant offsets for V. 930/// 931/// This strips all constant offsets off of V, leaving it the base pointer, and 932/// accumulates the total constant offset applied in the returned constant. It 933/// returns 0 if V is not a pointer, and returns the constant '0' if there are 934/// no constant offsets applied. 935ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) { 936 if (!DL || !V->getType()->isPointerTy()) 937 return 0; 938 939 unsigned IntPtrWidth = DL->getPointerSizeInBits(); 940 APInt Offset = APInt::getNullValue(IntPtrWidth); 941 942 // Even though we don't look through PHI nodes, we could be called on an 943 // instruction in an unreachable block, which may be on a cycle. 944 SmallPtrSet<Value *, 4> Visited; 945 Visited.insert(V); 946 do { 947 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 948 if (!GEP->isInBounds() || !accumulateGEPOffset(*GEP, Offset)) 949 return 0; 950 V = GEP->getPointerOperand(); 951 } else if (Operator::getOpcode(V) == Instruction::BitCast) { 952 V = cast<Operator>(V)->getOperand(0); 953 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 954 if (GA->mayBeOverridden()) 955 break; 956 V = GA->getAliasee(); 957 } else { 958 break; 959 } 960 assert(V->getType()->isPointerTy() && "Unexpected operand type!"); 961 } while (Visited.insert(V)); 962 963 Type *IntPtrTy = DL->getIntPtrType(V->getContext()); 964 return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset)); 965} 966 967/// \brief Analyze a call site for potential inlining. 968/// 969/// Returns true if inlining this call is viable, and false if it is not 970/// viable. It computes the cost and adjusts the threshold based on numerous 971/// factors and heuristics. If this method returns false but the computed cost 972/// is below the computed threshold, then inlining was forcibly disabled by 973/// some artifact of the routine. 974bool CallAnalyzer::analyzeCall(CallSite CS) { 975 ++NumCallsAnalyzed; 976 977 // Track whether the post-inlining function would have more than one basic 978 // block. A single basic block is often intended for inlining. Balloon the 979 // threshold by 50% until we pass the single-BB phase. 980 bool SingleBB = true; 981 int SingleBBBonus = Threshold / 2; 982 Threshold += SingleBBBonus; 983 984 // Perform some tweaks to the cost and threshold based on the direct 985 // callsite information. 986 987 // We want to more aggressively inline vector-dense kernels, so up the 988 // threshold, and we'll lower it if the % of vector instructions gets too 989 // low. 990 assert(NumInstructions == 0); 991 assert(NumVectorInstructions == 0); 992 FiftyPercentVectorBonus = Threshold; 993 TenPercentVectorBonus = Threshold / 2; 994 995 // Give out bonuses per argument, as the instructions setting them up will 996 // be gone after inlining. 997 for (unsigned I = 0, E = CS.arg_size(); I != E; ++I) { 998 if (DL && CS.isByValArgument(I)) { 999 // We approximate the number of loads and stores needed by dividing the 1000 // size of the byval type by the target's pointer size. 1001 PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType()); 1002 unsigned TypeSize = DL->getTypeSizeInBits(PTy->getElementType()); 1003 unsigned PointerSize = DL->getPointerSizeInBits(); 1004 // Ceiling division. 1005 unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize; 1006 1007 // If it generates more than 8 stores it is likely to be expanded as an 1008 // inline memcpy so we take that as an upper bound. Otherwise we assume 1009 // one load and one store per word copied. 1010 // FIXME: The maxStoresPerMemcpy setting from the target should be used 1011 // here instead of a magic number of 8, but it's not available via 1012 // DataLayout. 1013 NumStores = std::min(NumStores, 8U); 1014 1015 Cost -= 2 * NumStores * InlineConstants::InstrCost; 1016 } else { 1017 // For non-byval arguments subtract off one instruction per call 1018 // argument. 1019 Cost -= InlineConstants::InstrCost; 1020 } 1021 } 1022 1023 // If there is only one call of the function, and it has internal linkage, 1024 // the cost of inlining it drops dramatically. 1025 bool OnlyOneCallAndLocalLinkage = F.hasLocalLinkage() && F.hasOneUse() && 1026 &F == CS.getCalledFunction(); 1027 if (OnlyOneCallAndLocalLinkage) 1028 Cost += InlineConstants::LastCallToStaticBonus; 1029 1030 // If the instruction after the call, or if the normal destination of the 1031 // invoke is an unreachable instruction, the function is noreturn. As such, 1032 // there is little point in inlining this unless there is literally zero 1033 // cost. 1034 Instruction *Instr = CS.getInstruction(); 1035 if (InvokeInst *II = dyn_cast<InvokeInst>(Instr)) { 1036 if (isa<UnreachableInst>(II->getNormalDest()->begin())) 1037 Threshold = 1; 1038 } else if (isa<UnreachableInst>(++BasicBlock::iterator(Instr))) 1039 Threshold = 1; 1040 1041 // If this function uses the coldcc calling convention, prefer not to inline 1042 // it. 1043 if (F.getCallingConv() == CallingConv::Cold) 1044 Cost += InlineConstants::ColdccPenalty; 1045 1046 // Check if we're done. This can happen due to bonuses and penalties. 1047 if (Cost > Threshold) 1048 return false; 1049 1050 if (F.empty()) 1051 return true; 1052 1053 Function *Caller = CS.getInstruction()->getParent()->getParent(); 1054 // Check if the caller function is recursive itself. 1055 for (User *U : Caller->users()) { 1056 CallSite Site(U); 1057 if (!Site) 1058 continue; 1059 Instruction *I = Site.getInstruction(); 1060 if (I->getParent()->getParent() == Caller) { 1061 IsCallerRecursive = true; 1062 break; 1063 } 1064 } 1065 1066 // Populate our simplified values by mapping from function arguments to call 1067 // arguments with known important simplifications. 1068 CallSite::arg_iterator CAI = CS.arg_begin(); 1069 for (Function::arg_iterator FAI = F.arg_begin(), FAE = F.arg_end(); 1070 FAI != FAE; ++FAI, ++CAI) { 1071 assert(CAI != CS.arg_end()); 1072 if (Constant *C = dyn_cast<Constant>(CAI)) 1073 SimplifiedValues[FAI] = C; 1074 1075 Value *PtrArg = *CAI; 1076 if (ConstantInt *C = stripAndComputeInBoundsConstantOffsets(PtrArg)) { 1077 ConstantOffsetPtrs[FAI] = std::make_pair(PtrArg, C->getValue()); 1078 1079 // We can SROA any pointer arguments derived from alloca instructions. 1080 if (isa<AllocaInst>(PtrArg)) { 1081 SROAArgValues[FAI] = PtrArg; 1082 SROAArgCosts[PtrArg] = 0; 1083 } 1084 } 1085 } 1086 NumConstantArgs = SimplifiedValues.size(); 1087 NumConstantOffsetPtrArgs = ConstantOffsetPtrs.size(); 1088 NumAllocaArgs = SROAArgValues.size(); 1089 1090 // The worklist of live basic blocks in the callee *after* inlining. We avoid 1091 // adding basic blocks of the callee which can be proven to be dead for this 1092 // particular call site in order to get more accurate cost estimates. This 1093 // requires a somewhat heavyweight iteration pattern: we need to walk the 1094 // basic blocks in a breadth-first order as we insert live successors. To 1095 // accomplish this, prioritizing for small iterations because we exit after 1096 // crossing our threshold, we use a small-size optimized SetVector. 1097 typedef SetVector<BasicBlock *, SmallVector<BasicBlock *, 16>, 1098 SmallPtrSet<BasicBlock *, 16> > BBSetVector; 1099 BBSetVector BBWorklist; 1100 BBWorklist.insert(&F.getEntryBlock()); 1101 // Note that we *must not* cache the size, this loop grows the worklist. 1102 for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) { 1103 // Bail out the moment we cross the threshold. This means we'll under-count 1104 // the cost, but only when undercounting doesn't matter. 1105 if (Cost > (Threshold + VectorBonus)) 1106 break; 1107 1108 BasicBlock *BB = BBWorklist[Idx]; 1109 if (BB->empty()) 1110 continue; 1111 1112 // Analyze the cost of this block. If we blow through the threshold, this 1113 // returns false, and we can bail on out. 1114 if (!analyzeBlock(BB)) { 1115 if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca || 1116 HasIndirectBr) 1117 return false; 1118 1119 // If the caller is a recursive function then we don't want to inline 1120 // functions which allocate a lot of stack space because it would increase 1121 // the caller stack usage dramatically. 1122 if (IsCallerRecursive && 1123 AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller) 1124 return false; 1125 1126 break; 1127 } 1128 1129 TerminatorInst *TI = BB->getTerminator(); 1130 1131 // Add in the live successors by first checking whether we have terminator 1132 // that may be simplified based on the values simplified by this call. 1133 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { 1134 if (BI->isConditional()) { 1135 Value *Cond = BI->getCondition(); 1136 if (ConstantInt *SimpleCond 1137 = dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) { 1138 BBWorklist.insert(BI->getSuccessor(SimpleCond->isZero() ? 1 : 0)); 1139 continue; 1140 } 1141 } 1142 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) { 1143 Value *Cond = SI->getCondition(); 1144 if (ConstantInt *SimpleCond 1145 = dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) { 1146 BBWorklist.insert(SI->findCaseValue(SimpleCond).getCaseSuccessor()); 1147 continue; 1148 } 1149 } 1150 1151 // If we're unable to select a particular successor, just count all of 1152 // them. 1153 for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize; 1154 ++TIdx) 1155 BBWorklist.insert(TI->getSuccessor(TIdx)); 1156 1157 // If we had any successors at this point, than post-inlining is likely to 1158 // have them as well. Note that we assume any basic blocks which existed 1159 // due to branches or switches which folded above will also fold after 1160 // inlining. 1161 if (SingleBB && TI->getNumSuccessors() > 1) { 1162 // Take off the bonus we applied to the threshold. 1163 Threshold -= SingleBBBonus; 1164 SingleBB = false; 1165 } 1166 } 1167 1168 // If this is a noduplicate call, we can still inline as long as 1169 // inlining this would cause the removal of the caller (so the instruction 1170 // is not actually duplicated, just moved). 1171 if (!OnlyOneCallAndLocalLinkage && ContainsNoDuplicateCall) 1172 return false; 1173 1174 Threshold += VectorBonus; 1175 1176 return Cost < Threshold; 1177} 1178 1179#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1180/// \brief Dump stats about this call's analysis. 1181void CallAnalyzer::dump() { 1182#define DEBUG_PRINT_STAT(x) dbgs() << " " #x ": " << x << "\n" 1183 DEBUG_PRINT_STAT(NumConstantArgs); 1184 DEBUG_PRINT_STAT(NumConstantOffsetPtrArgs); 1185 DEBUG_PRINT_STAT(NumAllocaArgs); 1186 DEBUG_PRINT_STAT(NumConstantPtrCmps); 1187 DEBUG_PRINT_STAT(NumConstantPtrDiffs); 1188 DEBUG_PRINT_STAT(NumInstructionsSimplified); 1189 DEBUG_PRINT_STAT(SROACostSavings); 1190 DEBUG_PRINT_STAT(SROACostSavingsLost); 1191 DEBUG_PRINT_STAT(ContainsNoDuplicateCall); 1192 DEBUG_PRINT_STAT(Cost); 1193 DEBUG_PRINT_STAT(Threshold); 1194 DEBUG_PRINT_STAT(VectorBonus); 1195#undef DEBUG_PRINT_STAT 1196} 1197#endif 1198 1199INITIALIZE_PASS_BEGIN(InlineCostAnalysis, "inline-cost", "Inline Cost Analysis", 1200 true, true) 1201INITIALIZE_AG_DEPENDENCY(TargetTransformInfo) 1202INITIALIZE_PASS_END(InlineCostAnalysis, "inline-cost", "Inline Cost Analysis", 1203 true, true) 1204 1205char InlineCostAnalysis::ID = 0; 1206 1207InlineCostAnalysis::InlineCostAnalysis() : CallGraphSCCPass(ID) {} 1208 1209InlineCostAnalysis::~InlineCostAnalysis() {} 1210 1211void InlineCostAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { 1212 AU.setPreservesAll(); 1213 AU.addRequired<TargetTransformInfo>(); 1214 CallGraphSCCPass::getAnalysisUsage(AU); 1215} 1216 1217bool InlineCostAnalysis::runOnSCC(CallGraphSCC &SCC) { 1218 TTI = &getAnalysis<TargetTransformInfo>(); 1219 return false; 1220} 1221 1222InlineCost InlineCostAnalysis::getInlineCost(CallSite CS, int Threshold) { 1223 return getInlineCost(CS, CS.getCalledFunction(), Threshold); 1224} 1225 1226/// \brief Test that two functions either have or have not the given attribute 1227/// at the same time. 1228static bool attributeMatches(Function *F1, Function *F2, 1229 Attribute::AttrKind Attr) { 1230 return F1->hasFnAttribute(Attr) == F2->hasFnAttribute(Attr); 1231} 1232 1233/// \brief Test that there are no attribute conflicts between Caller and Callee 1234/// that prevent inlining. 1235static bool functionsHaveCompatibleAttributes(Function *Caller, 1236 Function *Callee) { 1237 return attributeMatches(Caller, Callee, Attribute::SanitizeAddress) && 1238 attributeMatches(Caller, Callee, Attribute::SanitizeMemory) && 1239 attributeMatches(Caller, Callee, Attribute::SanitizeThread); 1240} 1241 1242InlineCost InlineCostAnalysis::getInlineCost(CallSite CS, Function *Callee, 1243 int Threshold) { 1244 // Cannot inline indirect calls. 1245 if (!Callee) 1246 return llvm::InlineCost::getNever(); 1247 1248 // Calls to functions with always-inline attributes should be inlined 1249 // whenever possible. 1250 if (Callee->hasFnAttribute(Attribute::AlwaysInline)) { 1251 if (isInlineViable(*Callee)) 1252 return llvm::InlineCost::getAlways(); 1253 return llvm::InlineCost::getNever(); 1254 } 1255 1256 // Never inline functions with conflicting attributes (unless callee has 1257 // always-inline attribute). 1258 if (!functionsHaveCompatibleAttributes(CS.getCaller(), Callee)) 1259 return llvm::InlineCost::getNever(); 1260 1261 // Don't inline this call if the caller has the optnone attribute. 1262 if (CS.getCaller()->hasFnAttribute(Attribute::OptimizeNone)) 1263 return llvm::InlineCost::getNever(); 1264 1265 // Don't inline functions which can be redefined at link-time to mean 1266 // something else. Don't inline functions marked noinline or call sites 1267 // marked noinline. 1268 if (Callee->mayBeOverridden() || 1269 Callee->hasFnAttribute(Attribute::NoInline) || CS.isNoInline()) 1270 return llvm::InlineCost::getNever(); 1271 1272 DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName() 1273 << "...\n"); 1274 1275 CallAnalyzer CA(Callee->getDataLayout(), *TTI, *Callee, Threshold); 1276 bool ShouldInline = CA.analyzeCall(CS); 1277 1278 DEBUG(CA.dump()); 1279 1280 // Check if there was a reason to force inlining or no inlining. 1281 if (!ShouldInline && CA.getCost() < CA.getThreshold()) 1282 return InlineCost::getNever(); 1283 if (ShouldInline && CA.getCost() >= CA.getThreshold()) 1284 return InlineCost::getAlways(); 1285 1286 return llvm::InlineCost::get(CA.getCost(), CA.getThreshold()); 1287} 1288 1289bool InlineCostAnalysis::isInlineViable(Function &F) { 1290 bool ReturnsTwice = 1291 F.getAttributes().hasAttribute(AttributeSet::FunctionIndex, 1292 Attribute::ReturnsTwice); 1293 for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) { 1294 // Disallow inlining of functions which contain an indirect branch. 1295 if (isa<IndirectBrInst>(BI->getTerminator())) 1296 return false; 1297 1298 for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE; 1299 ++II) { 1300 CallSite CS(II); 1301 if (!CS) 1302 continue; 1303 1304 // Disallow recursive calls. 1305 if (&F == CS.getCalledFunction()) 1306 return false; 1307 1308 // Disallow calls which expose returns-twice to a function not previously 1309 // attributed as such. 1310 if (!ReturnsTwice && CS.isCall() && 1311 cast<CallInst>(CS.getInstruction())->canReturnTwice()) 1312 return false; 1313 } 1314 } 1315 1316 return true; 1317} 1318